예제 #1
0
def test_dataset(dataset_name):
    print('\n=========================')
    print('Loading dataset', dataset_name)
    sampler, _ = d5ds.load_dataset(dataset_name,
                                   'in',
                                   'label',
                                   batch_size=BATCH_SIZE)
    print('Dataset loaded')
    # ImageNet already outputs a sampler due to the file format
    if dataset_name != 'imagenet':
        sampler = d5.ShuffleSampler(sampler, BATCH_SIZE)

    # In this case, a simple WallclockTime metric would also suffice,
    # but a SamplerEventMetric can also be used in the context of training.
    metrics = [d5.SamplerEventMetric(d5.WallclockTime())]
    d5.test_sampler(sampler, metrics=metrics)
예제 #2
0
                                 kernel_size=[test.r, test.s],
                                 strides=[test.hstride, test.wstride],
                                 data_format='channels_first',
                                 kernel_initializer=tf.constant_initializer(W))

        # Create a conv2d function that only receives one variable
        cnv2d = lambda x: tf.layers.conv2d(
            x,
            filters=test.k,
            kernel_size=[test.r, test.s],
            strides=[test.hstride, test.wstride],
            data_format='channels_first',
            kernel_initializer=tf.constant_initializer(W))

        times, = \
            d5tf.test_nativeop_forward(cnv2d, [var_X], [None],
                                       metrics=[d5.WallclockTime(RUNS*AVG_OVER, AVG_OVER)])
    except Exception as ex:
        print('Exception:', ex)
        times = [-1.0]
    # Clear memory
    tf.reset_default_graph()

    with open('0_d5tf_conv_deepbench.log', 'a') as fp:
        fp.writelines([
            '{test.n},{test.c},{test.h},{test.w},'
            '{test.k},{test.r},{test.s},'
            '{test.hstride},{test.wstride},'
            '{time:.15f}\n'.format(test=test, time=time) for time in times
        ])
예제 #3
0
    parser.add_argument("--folder", type=str, nargs="?", default=None)
    args = vars(parser.parse_args())

    ds_shape = d5ds.dataset_shape('ucf101')
    ds_classes, sample_shape = ds_shape[0], ds_shape[1:]

    train_set, validation_set = d5ds.load_ucf101('0', 'label', folder=args['folder'],
                                                 normalize=True, max_length=700, skip_frames=10)

    seq_lengths = [av.open(path).streams.video[0].frames for path in train_set.data]
    train_sampler = d5.BucketSampler(train_set, BATCH_SIZE, seq_lengths, max_length=500,
                                     transformations=[d5ref.Crop((224, 224)),])
    validation_sampler = d5.OrderedSampler(validation_set, BATCH_SIZE,
                                           transformations=[d5ref.Crop((224, 224)),])

    model = ResNet50LSTM(num_classes=ds_classes, pretrained=True)

    loss = torch.nn.CrossEntropyLoss()
    executor = d5fw.PyTorchNativeGraphExecutor(model, loss, device=d5.GPUDevice())
    optimizer = d5fw.GradientDescent(executor, 'loss')

    METRICS = [
        d5.TestAccuracy(),
        d5.WallclockTime(reruns=0, avg_over=1)
    ]

    results = d5.test_training(executor, train_sampler, validation_sampler,
                               optimizer, EPOCHS, BATCH_SIZE, 'output',
                               metrics=[m for m in METRICS])

예제 #4
0
        def model(A, B):
            if test.a_trans:
                Amul = A.transpose(0, 1)
            else:
                Amul = A
            if test.b_trans:
                Bmul = B.transpose(0, 1)
            else:
                Bmul = B
            return torch.matmul(Amul, Bmul)

        # Run Deep500 test
        l2err, maxerr, times = \
            d5pt.test_nativeop_forward(model, [var_A, var_B], [Amult @ Bmult],
                                       metrics=[d5.L2Error(), d5.MaxError(),
                                                d5.WallclockTime(RUNS*AVG_OVER, AVG_OVER)])
    except Exception as ex:
        print('Exception:', ex)
        l2err = -1.0
        maxerr = -1.0
        times = [-1.0]

    with open('0_d5pt_gemm_deepbench.log', 'a') as fp:
        fp.writelines([
            '{test.m},{test.n},{test.k},'
            '{test.a_trans},{test.b_trans},'
            '{time:.15f},{l2err},{maxerr}\n'.format(test=test,
                                                    time=time,
                                                    l2err=l2err,
                                                    maxerr=maxerr)
            for time in times
예제 #5
0
파일: recipe.py 프로젝트: vibhatha/deep500
def run_recipe(fixed: Dict[str, Any], mutable: Dict[str, Any],
               metrics: List[Tuple[d5.TestMetric, Any]]) -> bool:
    """ Runs a Deep500 recipe (see file documentation). Returns True on success
        and False on failure, printing the unacceptable metrics. """

    # Argument validation
    if any(k in mutable for k in fixed.keys()):
        raise RuntimeError('Fixed and mutable components cannot overlap')

    # Create unified dictionary
    comps = dict(fixed, **mutable)

    # Add missing arguments and keyword arguments
    old_keys = list(comps.keys())
    for k in old_keys:
        if (k not in ['batch_size', 'epochs', 'events']
                and not (k.endswith('_args') or k.endswith('_kwargs'))):
            if ('%s_args' % k) not in comps:
                comps['%s_args' % k] = tuple()
            if ('%s_kwargs' % k) not in comps:
                comps['%s_kwargs' % k] = {}

    ########################################################################
    # Obtain dataset metadata
    if 'dataset' not in comps:
        raise SyntaxError('Dataset must be specified in training recipe')

    if isinstance(comps['dataset'], str):
        loss_op = d5ds.dataset_loss(comps['dataset'])
        ds_shape = d5ds.dataset_shape(comps['dataset'])
    else:
        loss_op = comps['dataset'].loss
        ds_shape = comps['dataset'].shape
    ds_classes, sample_shape = ds_shape[0], ds_shape[1:]

    # Construct network
    if 'model' not in comps:
        raise SyntaxError('Model must be specified in recipe')
    if 'batch_size' not in comps:
        raise SyntaxError('Batch size must be specified in training recipe')
    batch = comps['batch_size']

    if isinstance(comps['model'], str):
        # ONNX file
        if os.path.isfile(comps['model']):
            network = d5.parser.load_and_parse_model(comps['model'])
            input_node = network.get_input_nodes()[0].name
            output_node = network.get_output_nodes()[0].name
        else:  # Standard model
            network, input_node, output_node = \
                d5nt.create_model(comps['model'], batch, *comps['model_args'],
                                  classes=ds_classes, shape=sample_shape,
                                  **comps['model_kwargs'])
    else:  # Callable
        network, input_node, output_node = comps['model'](
            batch,
            *comps['model_args'],
            classes=ds_classes,
            shape=sample_shape,
            **comps['model_kwargs'])

    # Add loss function to model
    network.add_operation(loss_op([output_node, 'label'], 'loss'))

    # Construct dataset
    if isinstance(comps['dataset'], str):
        train_set, validation_set = d5ds.load_dataset(
            comps['dataset'], input_node, 'label', *comps['dataset_args'],
            **comps['dataset_kwargs'])
    else:
        train_set, validation_set = comps['dataset'](input_node, 'label',
                                                     *comps['dataset_args'],
                                                     **comps['dataset_kwargs'])

    # Construct samplers
    if 'train_sampler' in comps:
        if isinstance(comps['train_sampler'], d5.Sampler):
            train_sampler = comps['train_sampler']
        else:
            train_sampler = comps['train_sampler'](
                train_set, batch, *comps['train_sampler_args'],
                **comps['train_sampler_kwargs'])
    else:
        train_sampler = train_set

    if 'validation_sampler' in comps:
        if isinstance(comps['validation_sampler'], d5.Sampler):
            validation_sampler = comps['validation_sampler']
        else:
            validation_sampler = comps['validation_sampler'](
                validation_set, batch, *comps['validation_sampler_args'],
                **comps['validation_sampler_kwargs'])
    else:
        validation_sampler = validation_set

    # Construct executor
    if 'executor' not in comps:
        raise SyntaxError('Executor must be specified in recipe')
    if isinstance(comps['executor'], d5.GraphExecutor):
        executor = comps['executor']
    else:
        executor = comps['executor'](network, *comps['executor_args'],
                                     **comps['executor_kwargs'])

    # Construct optimizer
    if 'optimizer' not in comps:
        raise SyntaxError('Optimizer must be specified in training recipe')
    optimizer = comps['optimizer'](executor, 'loss', *comps['optimizer_args'],
                                   **comps['optimizer_kwargs'])

    # Add total time to metrics
    metrics.append((d5.WallclockTime(reruns=0, avg_over=1), None))

    ########################################################################
    # Create trainer and run
    if 'epochs' not in comps:
        raise SyntaxError('Epochs must be specified in training recipe')
    if 'events' not in comps:
        comps['events'] = None
    results = d5.test_training(executor,
                               train_sampler,
                               validation_sampler,
                               optimizer,
                               comps['epochs'],
                               batch,
                               output_node,
                               metrics=[m[0] for m in metrics],
                               events=comps['events'])

    # Verify results
    ok = True
    for (metric, acceptable), result in zip(metrics, results):
        if acceptable is not None:
            if result < acceptable:
                print('FAIL %s: %s (Acceptable: %s)' %
                      (type(metric).__name__, result, acceptable))
                ok = False

    if not ok:
        return False
    else:
        print('PASSED')
        return True
예제 #6
0
def DefaultOptimizerMetrics():
    return [d5.L2Error(), d5.WallclockTime()]
예제 #7
0
import os
import numpy as np
import sys
import time
from pydoc import locate

import deep500 as d5

if __name__ == '__main__':
    if len(sys.argv) != 3:
        print('USAGE: network_inference.py <ONNX FILE> <FRAMEWORK>')
        exit(1)

    # Find framework dynamically
    fwname = sys.argv[2]
    d5fw = locate('deep500.frameworks.' + fwname)
    if d5fw is None:
        raise ValueError('Unrecognized framework ' + fwname)

    model = d5.parser.load_and_parse_model(sys.argv[1])
    executor = d5fw.from_model(model)

    # Because the reference operators are relatively slow, run fewer times
    if fwname == 'reference':
        metrics = [d5.WallclockTime(reruns=5, avg_over=1)]
        d5.test_executor_inference(executor, metrics=metrics)
    else:
        d5.test_executor_inference(executor)