Exemplo n.º 1
0
def test_null_target_selection():
    import train
    train_dataset, test_dataset = train.get_datasets()
    count_null = 0
    for idx, data_item in enumerate(train_dataset):
        # print("graph_{}:{}".format(idx, data_item.y.item()))
        if data_item.y.item() == 0:
            count_null += 1

    print("{:.2f}% of targets are null".format(
        100 * (count_null / len(train_dataset))))
Exemplo n.º 2
0
 def test_train_one_epoch(self):
     train_ds, test_ds = train.get_datasets()
     input_rng = onp.random.RandomState(0)
     model = train.create_model(random.PRNGKey(0))
     optimizer = train.create_optimizer(model, 0.1, 0.9)
     optimizer, train_metrics = train.train_epoch(optimizer, train_ds, 128,
                                                  0, input_rng)
     self.assertLessEqual(train_metrics['loss'], 0.27)
     self.assertGreaterEqual(train_metrics['accuracy'], 0.92)
     loss, accuracy = train.eval_model(optimizer.target, test_ds)
     self.assertLessEqual(loss, 0.06)
     self.assertGreaterEqual(accuracy, 0.98)
Exemplo n.º 3
0
def test_random_target_selection():
    # dataset = RescueDataset("/home/okan/rescuesim/rcrs-server/dataset", "firebrigade", comp="robocup2019",
    #                         scenario="test2", team="ait", node_classification=False, read_info_map=True)
    import train
    train_dataset, test_dataset = train.get_datasets()

    num_random = 0
    for data_item in train_dataset:
        if 'type' in data_item.info_map:
            if data_item.info_map['type'] == 'random':
                num_random += 1

    print("{:.2f}% of selection is random".format(
        100 * (num_random / len(train_dataset))))
Exemplo n.º 4
0
    def test_single_train_step(self):
        train_ds, test_ds = train.get_datasets()
        batch_size = 32
        model = train.create_model(random.PRNGKey(0))
        optimizer = train.create_optimizer(model, 0.1, 0.9)

        # test single train step.
        optimizer, train_metrics = train.train_step(
            optimizer=optimizer,
            batch={k: v[:batch_size]
                   for k, v in train_ds.items()})
        self.assertLessEqual(train_metrics['loss'], 2.302)
        self.assertGreaterEqual(train_metrics['accuracy'], 0.0625)

        # Run eval model.
        loss, accuracy = train.eval_model(optimizer.target, test_ds)
        self.assertLess(loss, 2.252)
        self.assertGreater(accuracy, 0.2597)
Exemplo n.º 5
0
import argparse

import numpy as np

from sentiment.inference import SentimentInference
from train import get_datasets

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_path', default='.cache', type=str)
    args = parser.parse_args()

    trainset, testset = get_datasets(args.data_path)
    inferencer = SentimentInference()

    predicted = inferencer.inference_batch(testset['X'])
    accuracy = np.mean(predicted == testset['y'])
    print(accuracy)