Exemple #1
0
def And(onnx_node,
        ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Perform the `and` logical operation elementwise on two input tensors with numpy-style broadcasting."""
    left, right = numpy_style_broadcast_for_binary_operation(
        onnx_node, ng_inputs)
    left = ng.convert(ng.not_equal(left, 0), int)
    right = ng.convert(ng.not_equal(right, 0), int)
    return ng.convert(left * right, bool)
Exemple #2
0
def Xor(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Perform the `xor` logical operation elementwise on two input tensors."""
    left, right = broadcast_for_binary_operation(onnx_node, ng_inputs)
    not_left = ng.convert(ng.equal(left, 0), int)
    left = ng.convert(ng.not_equal(left, 0), int)
    right = ng.convert(ng.not_equal(right, 0), int)
    not_right = ng.convert(ng.equal(right, 0), int)

    return ((not_left * right) + (not_right * left)) > 0
Exemple #3
0
def classification_error(model, labels):
    """
    Auxiliary function to add classification error function to
    imported model for testing.

    Arguments:
        model - imported model
        labels - placeholder for one-hot labels array

    Returns:
        Classification error function (mean for batch)
    """
    try:
        errors = ng.not_equal(
            ng.argmax(model, out_axes=[labels.axes.batch_axis()]),
            ng.argmax(labels, out_axes=[labels.axes.batch_axis()]))
    except ValueError:
        errors = ng.not_equal(ng.argmax(model), ng.argmax(labels))

    return ng.mean(errors, out_axes=())
def binary_op(op_str, a, b):

    if op_str == '+':
        return a + b
    elif op_str == 'Add':
        return ng.add(a, b)
    elif op_str == '-':
        return a - b
    elif op_str == 'Sub':
        return ng.subtract(a, b)
    elif op_str == '*':
        return a * b
    elif op_str == 'Mul':
        return ng.multiply(a, b)
    elif op_str == '/':
        return a / b
    elif op_str == 'Div':
        return ng.divide(a, b)
    elif op_str == 'Dot':
        return Dot(a, b)
    elif op_str == 'Equal':
        return ng.equal(a, b)
    elif op_str == 'Greater':
        return ng.greater(a, b)
    elif op_str == 'GreaterEq':
        return ng.greater_equal(a, b)
    elif op_str == 'Less':
        return ng.less(a, b)
    elif op_str == 'LessEq':
        return ng.less_equal(a, b)
    elif op_str == 'Maximum':
        return ng.maximum(a, b)
    elif op_str == 'Minimum':
        return ng.minimum(a, b)
    elif op_str == 'NotEqual':
        return ng.not_equal(a, b)
    elif op_str == 'Power':
        return ng.power(a, b)
Exemple #5
0
def binary_op(op_str, a, b):

    if op_str == "+":
        return a + b
    elif op_str == "Add":
        return ng.add(a, b)
    elif op_str == "-":
        return a - b
    elif op_str == "Sub":
        return ng.subtract(a, b)
    elif op_str == "*":
        return a * b
    elif op_str == "Mul":
        return ng.multiply(a, b)
    elif op_str == "/":
        return a / b
    elif op_str == "Div":
        return ng.divide(a, b)
    elif op_str == "Equal":
        return ng.equal(a, b)
    elif op_str == "Greater":
        return ng.greater(a, b)
    elif op_str == "GreaterEq":
        return ng.greater_equal(a, b)
    elif op_str == "Less":
        return ng.less(a, b)
    elif op_str == "LessEq":
        return ng.less_equal(a, b)
    elif op_str == "Maximum":
        return ng.maximum(a, b)
    elif op_str == "Minimum":
        return ng.minimum(a, b)
    elif op_str == "NotEqual":
        return ng.not_equal(a, b)
    elif op_str == "Power":
        return ng.power(a, b)
Exemple #6
0
    optimizer = GradientDescentMomentum(learning_rate=learning_rate_policy,
                                        momentum_coef=0.9,
                                        wdecay=0.0001,
                                        iteration=inputs['iteration'])
    label_indices = inputs['label']
    train_loss = ng.cross_entropy_multi(resnet(inputs['image']),
                                        ng.one_hot(label_indices, axis=ax.Y))
    batch_cost = ng.sequential(
        [optimizer(train_loss),
         ng.mean(train_loss, out_axes=())])
    train_computation = ng.computation(batch_cost, "all")

    with Layer.inference_mode_on():
        inference_prob = resnet(inputs['image'])
        errors = ng.not_equal(ng.argmax(inference_prob, out_axes=[ax.N]),
                              label_indices)
        eval_loss = ng.cross_entropy_multi(
            inference_prob, ng.one_hot(label_indices, axis=ax.Y))
        eval_loss_names = ['cross_ent_loss', 'misclass']
        eval_computation = ng.computation([eval_loss, errors], "all")

    # Now bind the computations we are interested in
    transformer = ngt.make_transformer()
    train_function = transformer.add_computation(train_computation)
    eval_function = transformer.add_computation(eval_computation)

    tpbar = tqdm(unit="batches", ncols=100, total=args.num_iterations)
    interval_cost = 0.0

    for step, data in enumerate(train_set):
        data['iteration'] = step
Exemple #7
0
])

######################
# Input specification
ax.C.length, ax.H.length, ax.W.length = train_set.shapes['image']
ax.D.length = 1
ax.N.length = args.batch_size
ax.Y.length = 10

# placeholders with descriptive names
inputs = dict(image=ng.placeholder([ax.C, ax.H, ax.W, ax.N]),
              label=ng.placeholder([ax.N]))

optimizer = GradientDescentMomentum(0.01, 0.9)
output_prob = seq1.train_outputs(inputs['image'])
errors = ng.not_equal(ng.argmax(output_prob, out_axes=[ax.N]), inputs['label'])
loss = ng.cross_entropy_multi(output_prob,
                              ng.one_hot(inputs['label'], axis=ax.Y))
mean_cost = ng.mean(loss, out_axes=())
updates = optimizer(loss)

train_outputs = dict(batch_cost=mean_cost, updates=updates)
loss_outputs = dict(cross_ent_loss=loss, misclass_pct=errors)

# Now bind the computations we are interested in
transformer = ngt.make_transformer()
train_computation = make_bound_computation(transformer, train_outputs, inputs)
loss_computation = make_bound_computation(transformer, loss_outputs, inputs)

cbs = make_default_callbacks(output_file=args.output_file,
                             frequency=args.iter_interval,
Exemple #8
0
def Not(onnx_node, ng_inputs):  # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode
    """Return the negation of the input tensor elementwise."""
    data = ng.convert(ng.not_equal(ng_inputs[0], 0), bool)
    return ng.logical_not(data)
Exemple #9
0
optimizer = RMSProp(gradient_clip_value=gradient_clip_value)

train_prob = seq1(inputs['inp_txt'])
train_loss = ng.cross_entropy_multi(train_prob,
                                    ng.one_hot(inputs['tgt_txt'], axis=ax.Y),
                                    usebits=True)
batch_cost = ng.sequential(
    [optimizer(train_loss),
     ng.mean(train_loss, out_axes=())])
train_outputs = dict(batch_cost=batch_cost)

with Layer.inference_mode_on():
    inference_prob = seq1(inputs['inp_txt'])

errors = ng.not_equal(ng.argmax(inference_prob, reduction_axes=[ax.Y]),
                      inputs['tgt_txt'])
errors_last_char = ng.slice_along_axis(errors, ax.REC, time_steps - 1)

eval_loss = ng.cross_entropy_multi(inference_prob,
                                   ng.one_hot(inputs['tgt_txt'], axis=ax.Y),
                                   usebits=True)
eval_outputs = dict(cross_ent_loss=eval_loss,
                    misclass_pct=errors,
                    misclass_last_pct=errors_last_char)

# Now bind the computations we are interested in
with closing(ngt.make_transformer()) as transformer:
    train_computation = make_bound_computation(transformer, train_outputs,
                                               inputs)
    loss_computation = make_bound_computation(transformer, eval_outputs,
                                              inputs)
Exemple #10
0
def train_network(model, train_set, valid_set, batch_size, epochs, log_file):
    '''
    Trains the predefined network. Trains the model and saves the progress in
    the log file that is defined in the arguments

    model(object): Defines the model in Neon
    train_set(object): Defines the training set
    valid_set(object): Defines the validation set
    args(object): Training arguments
    batch_size(int): Minibatch size
    epochs(int): Number of training epoch
    log_file(string): File name to store trainig logs for plotting

    '''

    # Form placeholders for inputs to the network
    # Iterations needed for learning rate schedule
    inputs = train_set.make_placeholders(include_iteration=True)

    # Convert labels into one-hot vectors
    one_hot_label = ng.one_hot(inputs['label'], axis=ax.Y)

    learning_rate_policy = {
        'name': 'schedule',
        'schedule': list(np.arange(2, epochs, 2)),
        'gamma': 0.6,
        'base_lr': 0.001
    }

    optimizer = GradientDescentMomentum(learning_rate=learning_rate_policy,
                                        momentum_coef=0.9,
                                        wdecay=0.005,
                                        iteration=inputs['iteration'])

    # Define graph for training
    train_prob = model(inputs['video'])
    train_loss = ng.cross_entropy_multi(train_prob, one_hot_label)
    batch_cost = ng.sequential(
        [optimizer(train_loss),
         ng.mean(train_loss, out_axes=())])

    with closing(ngt.make_transformer()) as transformer:

        # Define graph for calculating validation set error and misclassification rate
        # Use inference mode for validation to avoid dropout in forward pass
        with Layer.inference_mode_on():
            inference_prob = model(inputs['video'])
            errors = ng.not_equal(ng.argmax(inference_prob), inputs['label'])
            eval_loss = ng.cross_entropy_multi(inference_prob, one_hot_label)
            eval_outputs = {'cross_ent_loss': eval_loss, 'misclass': errors}

            eval_computation = make_bound_computation(transformer,
                                                      eval_outputs, inputs)

        train_outputs = {'batch_cost': batch_cost}
        train_computation = make_bound_computation(transformer, train_outputs,
                                                   inputs)

        interval_cost = 0.0

        # Train in epochs
        logs = {'train': [], 'validation': [], 'misclass': []}
        for epoch in trange(epochs, desc='Epochs'):

            # Setup the training bar
            numBatches = train_set.ndata // batch_size
            tpbar = tqdm(unit='batches',
                         ncols=100,
                         total=numBatches,
                         leave=False)

            train_set.reset()
            valid_set.reset()

            train_log = []
            for step, data in enumerate(train_set):
                data = dict(data)
                data['iteration'] = epoch  # learning schedule based on epochs
                output = train_computation(data)
                train_log.append(float(output['batch_cost']))

                tpbar.update(1)
                tpbar.set_description("Training {:0.4f}".format(
                    float(output['batch_cost'])))
                interval_cost += float(output['batch_cost'])
            tqdm.write("Epoch {epch}  complete. "
                       "Avg Train Cost {cost:0.4f}".format(epch=epoch,
                                                           cost=interval_cost /
                                                           step))
            interval_cost = 0.0
            tpbar.close()
            validation_loss = run_validation(valid_set, eval_computation)
            tqdm.write("Avg losses: {}".format(validation_loss))
            logs['train'].append(train_log)
            logs['validation'].append(validation_loss['cross_ent_loss'])
            logs['misclass'].append(validation_loss['misclass'])

            # Save log data and plot at the end of each epoch
            with open(log_file, 'wb') as f:
                pickle.dump(logs, f)
            plot_logs(logs=logs)
Exemple #11
0
def train_mnist_mlp(transformer_name,
                    data_dir=None,
                    rng_seed=12,
                    batch_size=128,
                    train_iter=10,
                    eval_iter=10):
    assert transformer_name in ['cpu', 'hetr']
    assert isinstance(rng_seed, int)

    # Apply this metadata to graph regardless of transformer,
    # but it is ignored for non-HeTr case
    hetr_device_ids = (0, 1)

    # use consistent rng seed between runs
    np.random.seed(rng_seed)

    # Data
    train_data, valid_data = MNIST(path=data_dir).load_data()
    train_set = ArrayIterator(train_data,
                              batch_size,
                              total_iterations=train_iter)
    valid_set = ArrayIterator(valid_data, batch_size)
    inputs = train_set.make_placeholders()
    ax.Y.length = 10

    # Model
    with ng.metadata(device_id=hetr_device_ids, parallel=ax.N):
        seq1 = Sequential([
            Preprocess(functor=lambda x: x / 255.),
            Affine(nout=100, weight_init=GaussianInit(), activation=Rectlin()),
            Affine(axes=ax.Y,
                   weight_init=GaussianInit(),
                   activation=Logistic())
        ])

        train_prob = seq1(inputs['image'])
        train_loss = ng.cross_entropy_binary(
            train_prob, ng.one_hot(inputs['label'], axis=ax.Y))

        optimizer = GradientDescentMomentum(0.1, 0.9)
        batch_cost = ng.sequential(
            [optimizer(train_loss),
             ng.mean(train_loss, out_axes=())])
        train_outputs = dict(batch_cost=batch_cost)

        with Layer.inference_mode_on():
            inference_prob = seq1(inputs['image'])
        errors = ng.not_equal(ng.argmax(inference_prob, out_axes=[ax.N]),
                              inputs['label'])
        eval_loss = ng.cross_entropy_binary(
            inference_prob, ng.one_hot(inputs['label'], axis=ax.Y))
        eval_outputs = dict(cross_ent_loss=eval_loss, misclass_pct=errors)

    # Runtime
    with closing(
            ngt.make_transformer_factory(transformer_name)()) as transformer:
        train_computation = make_bound_computation(transformer, train_outputs,
                                                   inputs)
        loss_computation = make_bound_computation(transformer, eval_outputs,
                                                  inputs)

        train_costs = list()
        for step in range(train_iter):
            out = train_computation(next(train_set))
            train_costs.append(float(out['batch_cost']))

        ce_loss = list()
        for step in range(eval_iter):
            out = loss_computation(next(valid_set))
            ce_loss.append(np.mean(out['cross_ent_loss']))

        return train_costs, ce_loss
Exemple #12
0
def Xor(onnx_node, ng_inputs):  # type: (NodeWrapper, List[TensorOp]) -> Op
    left, right = cast_axes_for_binary_broadcast(onnx_node, ng_inputs)
    left = ng.not_equal(left, 0)
    right = ng.not_equal(right, 0)
    return (left + right) % 2