Пример #1
0
def test_input_8bit(dataset, precision):
    """ Test 8-bit input vs usual input precision
    """
    def run_model(dl, eight_bit_io=False):
        class MyModel(torch.nn.Module):
            def forward(self, x):
                return x * 2.0
        cast_op = "half" if precision == "16.16" else "full"
        model = NormalizeInputModel(MyModel(), mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], output_cast=cast_op) if eight_bit_io else MyModel()
        poptorch_model = poptorch.inferenceModel(model, poptorch.Options())
        input_data = next(iter(dl))[0]
        return poptorch_model(input_data)

    class HelperClass:
        def __init__(self):
            pass
    args = HelperClass()
    opts = poptorch.Options()
    args.batch_size = 1
    args.dataloader_worker = 4
    args.data = dataset
    args.model = "resnet18"
    args.precision = precision
    args.eight_bit_io = False
    args.normalization_location = "host"
    dataloader = get_data(args, opts, train=False)
    result_normal = run_model(dataloader, eight_bit_io=False)
    args.eight_bit_io = True
    args.normalization_location = "ipu"
    dataloader8 = get_data(args, opts, train=False)
    result_8bit = run_model(dataloader8, eight_bit_io=True)
    if not dataset == "generated":
        assert torch.allclose(result_8bit, result_normal, atol=4e-03, rtol=1e-03)
    assert result_normal.type() == result_8bit.type()
Пример #2
0
    def test_webdata_cache(self):
        """Test cache for webdata
        """
        class HelperClass:
            def __init__(self):
                pass
        args = HelperClass()
        args.precision = "16.16"
        args.model = 'resnet50'
        args.device_iterations = 1
        args.replicas = 1
        args.batch_size = 31
        args.dataloader_worker = 8
        args.normalization_location = 'ipu'
        args.eight_bit_io = False
        args.webdataset_percentage_to_use = 100
        args.data = "imagenet"
        args.webdataset_memory_cache_ratio = 0.8
        args.imagenet_data_path = Path(__file__).parent.parent.absolute().joinpath("data").joinpath("cifar10_webdata")

        dataloader = get_data(args, poptorch.Options(), train=False, async_dataloader=True, return_remaining=True)
        total_samples = 0
        for data, label in dataloader:
            total_samples += label.size()[0]
        assert total_samples == 10000
Пример #3
0
def test_get_data(async_dataloader, return_remaining, data_type, num_instances):
    """
    Check whether all the samples are used.
    """
    class HelperClass:
        def __init__(self):
            pass
    args = HelperClass()
    args.precision = "16.16"
    args.model = 'resnet50'
    args.device_iterations = 1
    args.replicas = 1
    args.batch_size = 31
    args.dataloader_worker = 8
    args.normalization_location = 'ipu'
    args.eight_bit_io = False
    args.webdataset_percentage_to_use = 100
    if data_type == "webdata":
        args.data = "imagenet"
        args.imagenet_data_path = Path(__file__).parent.parent.absolute().joinpath("data").joinpath("cifar10_webdata")
    else:
        args.data = 'cifar10'
    lengths = []
    for instance_id in range(num_instances):
        opts = poptorch.Options()
        if num_instances > 1:
            opts.Distributed.configureProcessId(instance_id, num_instances)
        dataloader = get_data(args, opts, train=False, async_dataloader=async_dataloader, return_remaining=return_remaining)
        length = 0
        for x, y in dataloader:
            length += x.size()[0]
        lengths.append(length)
    if return_remaining:
        assert sum(lengths) == 10000
        assert len(dataloader) == 10000 // (num_instances * 31) + 1
    else:
        expected_batch_count = 10000 // (num_instances * 31)
        assert sum(lengths) == expected_batch_count * (num_instances * 31)
        assert len(dataloader) == expected_batch_count
Пример #4
0
from ipu_options import get_options
from log import logger
from metrics import accuracy
from model import PipelinedViTForImageClassification

if __name__ == "__main__":
    # Validation loop
    # Build config from args
    config = transformers.ViTConfig(**vars(parse_args()))
    logger.info(f"Running config: {config.config}")

    # Execution parameters
    opts = get_options(config)

    test_loader = dataset.get_data(config,
                                   opts,
                                   train=False,
                                   async_dataloader=True)

    # Init from a checkpoint
    model = PipelinedViTForImageClassification.from_pretrained(
        config.pretrained_checkpoint,
        config=config).parallelize().half().train()
    if config.precision.startswith("16."):
        model.half()

    # Execution parameters
    valid_opts = poptorch.Options()
    valid_opts.deviceIterations(4)
    valid_opts.outputMode(poptorch.OutputMode.All)
    valid_opts.Precision.enableStochasticRounding(False)
Пример #5
0
# coding=utf-8
import datasets.words_db
import utils.utils
import datasets.dataset
import model.model
import numpy as np

article_fn = './data/sanguo.txt'
all_words = utils.utils.get_all_words_in_article(article_fn)
none_rpt_words = utils.utils.get_all_none_repeated_words(all_words)
# print(none_rpt_words)

wdb = datasets.words_db.WordsDb(none_rpt_words)
dataset = datasets.dataset.TrainDataset(
    article_fn, wdb, dataset_len=100, read_step=3)

#print(wdb.words2idx_fix_len("古今多少事,都付笑笑谈中", fix_len=10))
X, Y = dataset.get_data()

acticle_model = model.model.ArticleModel(wdb, X[0].shape, wdb.unknow_idx)
acticle_model.loss_train(X, Y, 1024, 200)
#article = acticle_model.get_article("俱往矣,数风流人", output_len=1000)
#print("output:%s" % article)
Пример #6
0
    return args


def benchmark_throughput(dataloader, iteration=2):
    for _ in range(iteration):
        total_sample_size = 0
        start_time = time.perf_counter()
        for input_data, _ in tqdm(dataloader, total=len(dataloader)):
            total_sample_size += input_data.size()[0]
        elapsed_time = time.perf_counter() - start_time

        if popdist.isPopdistEnvSet():
            elapsed_time, total_sample_size = utils.synchronize_throughput_values(
                elapsed_time,
                total_sample_size,
            )

        iteration_throughput = total_sample_size / elapsed_time
        print(f"Throughput of the iteration:{iteration_throughput:0.1f} img/sec")


if __name__ == '__main__':
    args = get_args()
    opts = poptorch.Options()
    if popdist.isPopdistEnvSet():
        hvd.init()
        opts.Distributed.configureProcessId(popdist.getInstanceIndex(), popdist.getNumInstances())
    opts.randomSeed(0)
    dataloader = get_data(args, opts, train=True, async_dataloader=not(args.disable_async_loading))
    benchmark_throughput(dataloader)
Пример #7
0
    config = transformers.ViTConfig(**vars(parse_args()))
    logger.info(f"Running config: {config.config}")

    # W&B
    if config.wandb:
        import wandb
        proj_name = config.wandb_project_name
        wandb.init(project=proj_name, settings=wandb.Settings(console="wrap"))
        wandb.config.update(vars(config))

    # Execution parameters
    opts = get_options(config)

    # Dataloader
    train_loader = dataset.get_data(config,
                                    opts,
                                    train=True,
                                    async_dataloader=True)

    steps_per_epoch = len(train_loader)
    if steps_per_epoch < 1:
        raise RuntimeError(
            "Not enough data in input_files for current configuration")

    # IPU Model and Optimizer
    model = PipelinedViTForImageClassification.from_pretrained(
        config.pretrained_checkpoint, config=config).parallelize().train()
    model.print_device_allocation()
    if config.precision.startswith("16."):
        model.half()
    optimizer = get_optimizer(config, model)
    scheduler = get_lr_scheduler(optimizer, config.lr_schedule,