示例#1
0
def test(model, device, test_loader, epoch, writer=None):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(
                output, target, reduction='sum').item()  # sum up batch loss
            pred = output.argmax(
                dim=1,
                keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)

    print(
        '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, len(test_loader.dataset),
            100. * correct / len(test_loader.dataset)))
    if writer:
        writer.add_scalar('test loss', test_loss, epoch)
        writer.add_scalar('test acc', correct / len(test_loader.dataset),
                          epoch)
    openbayestool.log_metric('loss', test_loss)
    openbayestool.log_metric('acc', correct / len(test_loader.dataset))
 def on_batch_end(self, batch, logs=None):
     """Print Training Metrics"""
     if batch % 5000 == 0:
         # 如果在 tensorflow 2.0 必须使用 accuracy 而不是 acc
         # openbayestool.log_metric('acc', float(logs.get('accuracy')))
         openbayestool.log_metric('precision', float(logs.get('acc')))
         openbayestool.log_metric('loss', float(logs.get('loss')))
示例#3
0
 def on_batch_end(self, batch, logs=None):
     """Print Training Metrics"""
     if batch % 5000 == 0:
       if tf.__version__.startswith('2'):
         openbayestool.log_metric('acc', float(logs.get('accuracy')))
       else:
         openbayestool.log_metric('acc', float(logs.get('acc')))
       openbayestool.log_metric('loss', float(logs.get('loss')))
def main(args):
    # process input file
    input_file = util.ensure_local_file(args['train_file'])
    user_map, item_map, tr_sparse, test_sparse = model.create_test_and_train_sets(
        args, input_file, args['data_type'])

    # train model
    output_row, output_col = model.train_model(args, tr_sparse)

    # save trained model to job directory
    model.save_model(args, user_map, item_map, output_row, output_col)

    # log results
    train_rmse = wals.get_rmse(output_row, output_col, tr_sparse)
    test_rmse = wals.get_rmse(output_row, output_col, test_sparse)

    if args['hypertune']:
        # write test_rmse metric for hyperparam tuning
        util.write_hptuning_metric(args, test_rmse)

    tf.logging.info('train RMSE = %.2f' % train_rmse)
    tf.logging.info('test RMSE = %.2f' % test_rmse)
    openbayestool.log_metric('rmse', test_rmse)
        optimizer.step()
    print(
        "Train Loss: {:.6f}, Acc: {:.6f}".format(
            train_loss / (len(train_data)), train_acc / (len(train_data))
        )
    )

    # evaluation--------------------------------
    model.eval()
    eval_loss = 0.0
    eval_acc = 0.0
    for batch_x, batch_y in test_loader:
        batch_x, batch_y = Variable(batch_x, volatile=True), Variable(
            batch_y, volatile=True
        )
        batch_x = batch_x.to(device)
        batch_y = batch_y.to(device)
        out = model(batch_x)
        loss = loss_func(out, batch_y)
        eval_loss += loss.item()
        pred = torch.max(out, 1)[1]
        num_correct = (pred == batch_y).sum()
        eval_acc += num_correct.item()
    print(
        "Test Loss: {:.6f}, Acc: {:.6f}".format(
            eval_loss / (len(test_data)), eval_acc / (len(test_data))
        )
    )
    openbayestool.log_metric("loss", eval_loss / (len(test_data)))
    openbayestool.log_metric("acc", eval_acc / (len(test_data)))
示例#6
0
 def on_batch_end(self, batch, logs=None):
     """Print Training Metrics"""
     if batch % 5000 == 0:
         openbayestool.log_metric('acc', float(logs.get('acc')))
         openbayestool.log_metric('loss', float(logs.get('loss')))