コード例 #1
0
ファイル: dataset_latency.py プロジェクト: vibhatha/deep500
def test_dataset(dataset_name):
    print('\n=========================')
    print('Loading dataset', dataset_name)
    sampler, _ = d5ds.load_dataset(dataset_name,
                                   'in',
                                   'label',
                                   batch_size=BATCH_SIZE)
    print('Dataset loaded')
    # ImageNet already outputs a sampler due to the file format
    if dataset_name != 'imagenet':
        sampler = d5.ShuffleSampler(sampler, BATCH_SIZE)

    # In this case, a simple WallclockTime metric would also suffice,
    # but a SamplerEventMetric can also be used in the context of training.
    metrics = [d5.SamplerEventMetric(d5.WallclockTime())]
    d5.test_sampler(sampler, metrics=metrics)
コード例 #2
0
ファイル: train_until.py プロジェクト: vibhatha/deep500
    accuracy = 98.0 if len(sys.argv) < 4 else float(sys.argv[3])

    # Create CNN using ONNX
    ds_cls, ds_c, ds_h, ds_w = d5ds.dataset_shape(dsname)
    onnx_file = d5net.export_network(netname,
                                     BATCH_SIZE,
                                     classes=ds_cls,
                                     shape=(ds_c, ds_h, ds_w))
    model = d5.parser.load_and_parse_model(onnx_file)

    # Recover input and output nodes (assuming only one input and one output)
    INPUT_NODE = model.get_input_nodes()[0].name
    OUTPUT_NODE = model.get_output_nodes()[0].name

    # Create dataset and add loss function to model
    train_set, test_set = d5ds.load_dataset(dsname, INPUT_NODE, LABEL_NODE)
    model.add_operation(
        d5.ops.SoftmaxCrossEntropy([OUTPUT_NODE, LABEL_NODE], 'loss'))

    # Create executor and reference SGD optimizer
    executor = d5tf.from_model(model)
    optimizer = d5ref.GradientDescent(executor, 'loss')

    # Create samplers
    train_sampler = d5.ShuffleSampler(train_set, BATCH_SIZE)
    test_sampler = d5.ShuffleSampler(test_set, BATCH_SIZE)

    # Create runner (training/test manager)
    runner = d5.Trainer(train_sampler, test_sampler, executor, optimizer,
                        OUTPUT_NODE)
    #############################
コード例 #3
0
ファイル: recipe.py プロジェクト: vibhatha/deep500
def run_recipe(fixed: Dict[str, Any], mutable: Dict[str, Any],
               metrics: List[Tuple[d5.TestMetric, Any]]) -> bool:
    """ Runs a Deep500 recipe (see file documentation). Returns True on success
        and False on failure, printing the unacceptable metrics. """

    # Argument validation
    if any(k in mutable for k in fixed.keys()):
        raise RuntimeError('Fixed and mutable components cannot overlap')

    # Create unified dictionary
    comps = dict(fixed, **mutable)

    # Add missing arguments and keyword arguments
    old_keys = list(comps.keys())
    for k in old_keys:
        if (k not in ['batch_size', 'epochs', 'events']
                and not (k.endswith('_args') or k.endswith('_kwargs'))):
            if ('%s_args' % k) not in comps:
                comps['%s_args' % k] = tuple()
            if ('%s_kwargs' % k) not in comps:
                comps['%s_kwargs' % k] = {}

    ########################################################################
    # Obtain dataset metadata
    if 'dataset' not in comps:
        raise SyntaxError('Dataset must be specified in training recipe')

    if isinstance(comps['dataset'], str):
        loss_op = d5ds.dataset_loss(comps['dataset'])
        ds_shape = d5ds.dataset_shape(comps['dataset'])
    else:
        loss_op = comps['dataset'].loss
        ds_shape = comps['dataset'].shape
    ds_classes, sample_shape = ds_shape[0], ds_shape[1:]

    # Construct network
    if 'model' not in comps:
        raise SyntaxError('Model must be specified in recipe')
    if 'batch_size' not in comps:
        raise SyntaxError('Batch size must be specified in training recipe')
    batch = comps['batch_size']

    if isinstance(comps['model'], str):
        # ONNX file
        if os.path.isfile(comps['model']):
            network = d5.parser.load_and_parse_model(comps['model'])
            input_node = network.get_input_nodes()[0].name
            output_node = network.get_output_nodes()[0].name
        else:  # Standard model
            network, input_node, output_node = \
                d5nt.create_model(comps['model'], batch, *comps['model_args'],
                                  classes=ds_classes, shape=sample_shape,
                                  **comps['model_kwargs'])
    else:  # Callable
        network, input_node, output_node = comps['model'](
            batch,
            *comps['model_args'],
            classes=ds_classes,
            shape=sample_shape,
            **comps['model_kwargs'])

    # Add loss function to model
    network.add_operation(loss_op([output_node, 'label'], 'loss'))

    # Construct dataset
    if isinstance(comps['dataset'], str):
        train_set, validation_set = d5ds.load_dataset(
            comps['dataset'], input_node, 'label', *comps['dataset_args'],
            **comps['dataset_kwargs'])
    else:
        train_set, validation_set = comps['dataset'](input_node, 'label',
                                                     *comps['dataset_args'],
                                                     **comps['dataset_kwargs'])

    # Construct samplers
    if 'train_sampler' in comps:
        if isinstance(comps['train_sampler'], d5.Sampler):
            train_sampler = comps['train_sampler']
        else:
            train_sampler = comps['train_sampler'](
                train_set, batch, *comps['train_sampler_args'],
                **comps['train_sampler_kwargs'])
    else:
        train_sampler = train_set

    if 'validation_sampler' in comps:
        if isinstance(comps['validation_sampler'], d5.Sampler):
            validation_sampler = comps['validation_sampler']
        else:
            validation_sampler = comps['validation_sampler'](
                validation_set, batch, *comps['validation_sampler_args'],
                **comps['validation_sampler_kwargs'])
    else:
        validation_sampler = validation_set

    # Construct executor
    if 'executor' not in comps:
        raise SyntaxError('Executor must be specified in recipe')
    if isinstance(comps['executor'], d5.GraphExecutor):
        executor = comps['executor']
    else:
        executor = comps['executor'](network, *comps['executor_args'],
                                     **comps['executor_kwargs'])

    # Construct optimizer
    if 'optimizer' not in comps:
        raise SyntaxError('Optimizer must be specified in training recipe')
    optimizer = comps['optimizer'](executor, 'loss', *comps['optimizer_args'],
                                   **comps['optimizer_kwargs'])

    # Add total time to metrics
    metrics.append((d5.WallclockTime(reruns=0, avg_over=1), None))

    ########################################################################
    # Create trainer and run
    if 'epochs' not in comps:
        raise SyntaxError('Epochs must be specified in training recipe')
    if 'events' not in comps:
        comps['events'] = None
    results = d5.test_training(executor,
                               train_sampler,
                               validation_sampler,
                               optimizer,
                               comps['epochs'],
                               batch,
                               output_node,
                               metrics=[m[0] for m in metrics],
                               events=comps['events'])

    # Verify results
    ok = True
    for (metric, acceptable), result in zip(metrics, results):
        if acceptable is not None:
            if result < acceptable:
                print('FAIL %s: %s (Acceptable: %s)' %
                      (type(metric).__name__, result, acceptable))
                ok = False

    if not ok:
        return False
    else:
        print('PASSED')
        return True
コード例 #4
0
        z_t = self.z[param_name]

        z_t2 = z_t - self.alpha_t * eta_t * grad
        y_t2 = old_param - eta_t * grad

        self.z[param_name] = z_t2
        self.y[param_name] = y_t2
        self.squares[param_name] = squared_grad
        adjusted_lr = self.lr / (self.eps + np.sqrt(squared_grad))

        self.init = False
        return old_param - adjusted_lr * grad

if __name__ == '__main__':
    from deep500 import networks as d5net, datasets as d5ds
    from deep500.frameworks import tensorflow as d5tf
    from deep500.frameworks import reference as d5ref
    batch_size = 1024

    # Create network and dataset
    net, innode, outnode = d5net.create_model('simple_cnn', batch_size)
    net.add_operation(d5.ops.LabelCrossEntropy([outnode, 'label'], 'loss'))
    train, test = d5ds.load_dataset('mnist', innode, 'label')

    # Create executor and optimizer
    executor = d5tf.from_model(net)
    opt = AcceleGradOptimizer(executor)
    
    # Run training
    d5.test_training(executor, train, test, opt, 5, batch_size, outnode)