def test_4d_reduction(transformer_factory, input_axes):

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
    x = ng.constant(x_val, input_axes)

    out1 = ng.sum(x, reduction_axes=input_axes[1])
    out2 = ng.sum(x, reduction_axes=input_axes[3])

    with executor([out1, out2]) as ex:
        graph_val1, graph_val2 = ex()
        np_val1 = np.sum(x_val, 1)
        np_val2 = np.sum(x_val, 3)
        ng.testing.assert_allclose(graph_val1,
                                   np_val1,
                                   rtol=1e-4,
                                   atol_multiplier=x_val.shape[1])
        ng.testing.assert_allclose(graph_val2,
                                   np_val2,
                                   rtol=1e-4,
                                   atol_multiplier=x_val.shape[3])
Beispiel #2
0
    def __call__(self, inputs):
        query = ng.cast_axes(inputs['query'], [self.batch_axis, self.sentence_rec_axis])

        # Query embedding [batch, sentence_axis, F]
        q_emb = self.LUT_A(query)

        # Multiply by position encoding and sum
        u_0 = ng.sum(q_emb * self.pos_enc, reduction_axes=[self.sentence_rec_axis])  # [batch, F]

        # Start a list of the internal states of the model.
        # Will be appended to after each memory hop
        u = [u_0]

        for hopn in range(self.nhops):
            keys = ng.cast_axes(inputs['keys'], [self.batch_axis, self.memory_axis,
                                self.sentence_rec_axis])
            value = ng.cast_axes(inputs['values'], [self.batch_axis, self.memory_axis,
                                 self.val_len_axis])

            # Embed keys
            m_emb_A = self.LUT_A(keys)
            m_A = ng.sum(m_emb_A * self.pos_enc,
                         reduction_axes=[self.sentence_rec_axis])  # [batch, memory_axis, F]

            # Compute scalar similarity between internal state and each memory
            # Equivalent to dot product between u[-1] and each memory in m_A
            dotted = ng.sum(u[-1] * m_A, reduction_axes=[self.embedding_axis])

            probs = ng.softmax(dotted, self.memory_axis)  # [batch, memory_axis]

            # Embed values with same embedding as keys, or new LUTs
            if self.use_v_luts:
                m_emb_C = self.LUTs_C[hopn](value)
            else:
                m_emb_C = self.LUT_A(value)

            m_C = ng.sum(m_emb_C * self.pos_enc, reduction_axes=[self.sentence_rec_axis])

            # Compute weighted sum of output embeddings
            o_k = ng.sum(probs * m_C, reduction_axes=[self.memory_axis])  # [batch, F]

            u_k = u[-1] + o_k  # [batch, F]

            # Add new internal state
            u.append(u_k)

        # Compute predicted answer from product of final internal state and final LUT weight matrix
        if self.use_v_luts:
            a_logits = ng.dot(self.LUTs_C[-1].W, u[-1])  # [batch, V]
        else:
            a_logits = ng.dot(self.LUT_A.W, u[-1])  # [batch, V]
        # rename V to vocab_axis to match answer
        a_logits = ng.cast_axes(a_logits, [self.vocab_axis, self.batch_axis])
        a_pred = ng.softmax(a_logits, self.vocab_axis)

        return a_pred, a_logits
Beispiel #3
0
    def ReduceElements(self, cntk_op, inputs):
        """
        Returns a reduction operation (max, min, mean, sum, prod) or a calculation which matches
        CNTK's LogSum reduction (`reduce_log_sum_exp` function).

        Arguments:
            cntk_op: CNTK operation to be imported.
            inputs: List of inputs to this node.

        Returns:
            A ngraph Op.
        """
        assert len(inputs) == 1

        reduction_op_name = cntk_op.attributes.get('reductionOpName')
        # CNTK API defines a reductionKeepDimensions flag, but we currently don't use it
        # keep_dimensions = cntk_op.attributes.get('reductionKeepDimensions', False)

        cntk_op_attribute_axes = []
        if cntk_op.attributes.get('axisVec'):
            cntk_op_attribute_axes.extend(cntk_op.attributes.get('axisVec'))
        elif cntk_op.attributes.get('axis'):
            cntk_op_attribute_axes.append(cntk_op.attributes.get('axis'))

        # CNTK axes are numbered in reverse order: the last axis is labeled 0, the previous 1, etc.
        reduction_axes_indexes = [len(inputs[0].axes) - 1 - i
                                  for (_, _, i) in cntk_op_attribute_axes]
        reduction_ng_axes_list = [axis for (i, axis) in enumerate(inputs[0].axes)
                                  if i in reduction_axes_indexes]
        reduction_ng_axes = ng.Axes(axes=reduction_ng_axes_list)

        if reduction_op_name == 'Max':
            return ng.max(inputs[0], reduction_axes=reduction_ng_axes).named(cntk_op.uid)

        if reduction_op_name == 'Min':
            return ng.min(inputs[0], reduction_axes=reduction_ng_axes).named(cntk_op.uid)

        if reduction_op_name == 'Mean':
            return ng.mean(inputs[0], reduction_axes=reduction_ng_axes).named(cntk_op.uid)

        if reduction_op_name == 'Sum':
            return ng.sum(inputs[0], reduction_axes=reduction_ng_axes).named(cntk_op.uid)

        if reduction_op_name == 'Prod':
            return ng.prod(inputs[0], reduction_axes=reduction_ng_axes).named(cntk_op.uid)

        if reduction_op_name == 'LogSum':
            return ng.log(ng.sum(ng.exp(inputs[0]), reduction_axes=reduction_ng_axes))\
                .named(cntk_op.uid)

        raise NotImplementedError('CNTKImporter: ReduceElements does not support operation %s',
                                  reduction_op_name)
Beispiel #4
0
def test_4d_reduction(transformer_factory, input_axes):
    x_val = np.absolute(np.random.randn(*input_axes.lengths))
    x = ng.constant(x_val, input_axes)

    out1 = ng.sum(x, reduction_axes=input_axes[1])
    out2 = ng.sum(x, reduction_axes=input_axes[3])

    with executor([out1, out2]) as ex:
        graph_val1, graph_val2 = ex()
        np_val1 = np.sum(x_val, 1)
        np_val2 = np.sum(x_val, 3)
        np.testing.assert_allclose(graph_val1, np_val1, rtol=1e-4)
        np.testing.assert_allclose(graph_val2, np_val2, rtol=1e-4)
Beispiel #5
0
def test_variance_sqrt_inverse(transformer_factory, input_tensor):
    inputs = input_tensor
    targets = ng.placeholder(inputs.axes)

    epsilon = 1e-3

    inp_stat = ng.reciprocal(
        ng.sqrt(
            ng.variance(inputs, reduction_axes=inputs.axes.batch_axes()) +
            epsilon))
    err = ng.sum(inp_stat - targets, out_axes=())
    d_inputs = ng.deriv(err, inputs)
    with executor([err, d_inputs], inputs, targets) as comp_func:

        input_value = rng.uniform(-1, 1, inputs.axes)
        target_value = rng.uniform(-1, 1, targets.axes)
        ng_f_res, ng_b_res = comp_func(input_value, target_value)

        npv = np.var(input_value, axis=1, keepdims=True) + epsilon
        np_f_res = 1.0 / np.sqrt(npv)

        npv_delta = 2 * (input_value -
                         np.mean(input_value, axis=1, keepdims=True))

        np_b_res = -0.5 * np_f_res / npv * npv_delta

        np_f_res = np.sum(np_f_res - target_value)

        ng.testing.assert_allclose(np_f_res, ng_f_res, atol=1e-4, rtol=1e-4)
        ng.testing.assert_allclose(np_b_res, ng_b_res, atol=1e-4, rtol=1e-4)
Beispiel #6
0
def test_variance_wgrad(transformer_factory):
    ax = ng.name_scope('x')
    ax.N = ng.make_axis(128, batch=True)
    ax.Y = ng.make_axis(100)

    inputs = ng.placeholder([ax.Y, ax.N])
    targets = ng.placeholder([ax.Y, ax.N])

    inp_stat = ng.variance(inputs, reduction_axes=inputs.axes.batch_axes())
    err = ng.sum(inp_stat - targets, out_axes=())
    d_inputs = ng.deriv(err, inputs)
    comp_func = executor([err, d_inputs], inputs, targets)

    input_value = rng.uniform(-0.1, 0.1, inputs.axes)
    target_value = rng.uniform(-0.1, 0.1, targets.axes)
    ng_f_res, ng_b_res = comp_func(input_value, target_value)

    np_f_res = np.sum(
        np.var(input_value, axis=1, keepdims=True) - target_value)

    np.testing.assert_allclose(np_f_res, ng_f_res, atol=1e-4, rtol=1e-4)

    np_b_res = 2 * (input_value - np.mean(input_value, axis=1, keepdims=True))

    np.testing.assert_allclose(np_b_res, ng_b_res, atol=1e-4, rtol=1e-4)
Beispiel #7
0
def compare_optimizer_variable_select(opt_ng, opt_ref):

    # Set up data placeholders
    C = ng.make_axis(20)
    N = ng.make_axis(32, name='N')

    data = ng.placeholder([C, N])
    target = ng.placeholder([N])

    # params to be updated using optimizer to be tested
    np_W1 = np.random.rand(C.length)
    np_W2 = np.random.rand(C.length)
    W1 = ng.variable([C], initial_value=np_W1)
    W2 = ng.variable([C], initial_value=np_W2)

    # Set up op graph
    cost = ng.sum(target - ng.dot(W1, data) - ng.dot(W2, data), out_axis=())
    updated_weights = ng.sequential([opt_ng(cost, variables=[W1]), W1])

    # Set up the computation and run the "train" loop
    with ExecutorFactory() as ex:
        opt_ng_comp = ex.transformer.computation([updated_weights, W2], data,
                                                 target)
        mock_dataset = data_generator(20, C.length, N.length)

        for x, y in mock_dataset:
            [ng_W1,
             ng_W2] = opt_ng_comp(x, y)  # updated weights for ngraph optimizer
            np_W1 = opt_ref(x,
                            np_W1)  # updated weights for reference optimizer

            ng.testing.assert_allclose(np_W1, ng_W1, rtol=1e-3)
            ng.testing.assert_allclose(np_W2, ng_W2, rtol=1e-3)
Beispiel #8
0
def test_reduce_vector(hetr_device):
    """
    A whole vector is produced on each worker and should be reduced
    before being returned, but not along its axes since it
    does not have the parallel axis in its axes
    """
    if hetr_device == 'gpu':
        pytest.xfail("broadcast communication ops not yet supported on gpus")

    H = ng.make_axis(length=4, name='height')
    N = ng.make_axis(length=8, name='batch')
    weight = ng.make_axis(length=2, name='weight')
    x = ng.placeholder(axes=[N, H])
    w = ng.placeholder(axes=[H, weight])
    with ng.metadata(device=hetr_device, device_id=('0', '1'), parallel=N):
        dot = ng.dot(x, w)
        out = ng.sum(dot, N)

    np_x = np.random.randint(100, size=[N.length, H.length])
    np_weight = np.random.randint(100, size=[H.length, weight.length])
    with closing(ngt.make_transformer_factory(
            'hetr', device=hetr_device)()) as transformer:
        computation = transformer.computation(out, x, w)
        res = computation(np_x, np_weight)
        # TODO should the reduce infer a sum or mean?
        expected = np.sum(np.dot(np_x, np_weight), 0) / 2.
        np.testing.assert_array_equal(res, expected)
    def __call__(self,
                 cost_func,
                 variables=None,
                 subgraph=None,
                 warning=False):
        """
        Arguments:
            cost_func (Op): The cost function to optimize
            variables (list of variables): List of variables to optimize
            subgraph (SubGraph): A subgraph instance containing all variables to optimize
            warning (bool): If True displays warning message if any variables
                            specified do not participate in batch cost computation

        .. Note::
            If subgraph is provided, the variables to optimize will be taken from it.
            Otherwise, they can be provided explicitly by passing a list as `variables`.
            If neither `subgraph` nor `variables` is provided, the variables to optimize will be
            all trainable variables on which `cost` depends.
        """

        all_updates = []
        batch_cost = ng.sum(cost_func, out_axes=())
        if cost_func.axes.batch_axis() is None:
            batch_size = 1
        else:
            batch_size = cost_func.axes.batch_axis().length

        # determine variables to optimize
        if subgraph is not None:
            if variables is not None:
                raise ValueError(
                    "variables and subgraph cannot both be specified.")
            variables = list(subgraph.variables.values())

        if variables is None:
            variables = batch_cost.variables()
        elif variables is not None and warning is True:
            all_variables = batch_cost.variables()
            selected_variables = all_variables & set(variables)
            if len(selected_variables) < len(variables):
                logger.warn(
                    "not all selected variables participate in cost computation"
                )

        # gradients
        grads = [ng.deriv(batch_cost, v) / batch_size for v in variables]
        scale_factor = clip_gradient_norm(grads, self.gradient_clip_norm)

        # updates
        for variable, grad in zip(variables, grads):
            updates = self.variable_update(variable, grad, scale_factor)
            all_updates.append(updates)
        updates = ng.doall(all_updates)
        grads = ng.doall(grads)
        clips = ng.doall([
            ng.assign(variable,
                      clip_weight_value(variable, self.weight_clip_value))
            for variable in variables
        ])
        return ng.sequential([grads, updates, clips, 0])
Beispiel #10
0
    def __call__(self, cost_func):
        all_updates = []
        batch_cost = ng.sum(cost_func, out_axes=())
        batch_size = cost_func.axes.batch_axes()[0].length

        grads = [
            ng.deriv(batch_cost, v) / batch_size
            for v in batch_cost.variables()
        ]
        scale_factor = clip_gradient_norm(grads, batch_size,
                                          self.gradient_clip_norm)

        epsilon, decay = (self.epsilon, self.decay_rate)
        for i, (variable, grad) in enumerate(zip(batch_cost.variables(),
                                                 grads)):
            grad = clip_gradient_value(grad, self.gradient_clip_value)
            state = ng.persistent_tensor(axes=variable.axes, initial_value=0.)
            all_updates.append(
                ng.sequential([
                    ng.assign(state,
                              decay * state + (1.0 - decay) * ng.square(grad)),
                    ng.assign(
                        variable,
                        variable - ((scale_factor * grad * self.lrate) /
                                    (ng.sqrt(state + epsilon) + epsilon)))
                ]))

        return ng.doall(all_updates)
Beispiel #11
0
 def __call__(self, cost_func):
     all_updates = []
     batch_cost = ng.sum(cost_func, out_axes=())
     batch_size = cost_func.axes.batch_axes()[0].length
     grads = [
         ng.deriv(batch_cost, v) / batch_size
         for v in batch_cost.variables()
     ]
     scale_factor = clip_gradient_norm(grads, batch_size,
                                       self.gradient_clip_norm)
     for variable, grad in zip(batch_cost.variables(), grads):
         updates = []
         velocity = ng.persistent_tensor(
             axes=variable.axes,
             initial_value=0.).named(variable.name + '_vel')
         clip_grad = clip_gradient_value(grad, self.gradient_clip_value)
         lr = -self.lrate * (scale_factor * clip_grad +
                             self.wdecay * variable)
         updates.append(
             ng.assign(velocity, velocity * self.momentum_coef + lr))
         if self.nesterov:
             delta = (self.momentum_coef * velocity + lr)
         else:
             delta = velocity
         updates.append(ng.assign(variable, variable + delta))
         all_updates.append(ng.sequential(updates))
     return ng.doall(all_updates)
Beispiel #12
0
    def __call__(self, cost_func):
        with ng.Op.saved_user_deps():
            velocity_updates, param_updates = [], []
            batch_cost = ng.sum(cost_func, out_axes=())
            batch_size = cost_func.axes.batch_axes()[0].length

            grads = [ng.deriv(batch_cost, v) / batch_size for v in batch_cost.variables()]
            scale_factor = clip_gradient_norm(grads, batch_size, self.gradient_clip_norm)

            for variable, grad in zip(batch_cost.variables(), grads):
                grad = clip_gradient_value(grad, self.gradient_clip_value)

                velocity = ng.persistent_tensor(axes=variable.axes,
                                                initial_value=0.).named(variable.name + '_vel')
                velocity_updates.append(
                    ng.assign(velocity,
                              velocity * self.momentum_coef - self.learning_rate * (
                                  scale_factor * grad + self.wdecay * variable)))

                param_updates.append(ng.assign(variable, variable + velocity))

            lr_update = [ng.assign(self.learning_rate,
                                   self.schedule.get_learning_rate(self.learning_rate,
                                                                   self.iteration_index))]

            updates = ng.doall(velocity_updates + param_updates + lr_update)
            self.iteration_index += 1

        return updates
Beispiel #13
0
def test_sequential_reduce(M):
    x = ng.variable([M], initial_value=1)
    x0 = x + x
    x1 = ng.sum(x0, out_axes=())
    x2 = ng.sum(x0, out_axes=()) + x0
    p = ng.sequential([x0, x1, x2])

    with ExecutorFactory() as ex:
        x0_val, x1_val, x2_val, p_val, x_val = ex.executor([x0, x1, x2, p,
                                                            x])()
        x0_np = x_val + x_val
        x1_np = np.sum(x0_np)
        x2_np = x1_np + x0_np
        assert np.allclose(x0_val, x0_np)
        assert np.allclose(x1_val, x1_np)
        assert np.allclose(x2_val, x2_np)
        assert np.allclose(p_val, x2_np)
Beispiel #14
0
def test_conv_flatten_deriv(n4_hw12_c3_5x5):
    """
    Test deriv of conv followed by flatten
    """
    cf = ConvParams(**n4_hw12_c3_5x5)

    axes_rsck = ng.make_axes([cf.ax_f[2], cf.ax_f[3], cf.ax_f[0], cf.ax_f[-1]])
    axes_rsck_prime = ng.make_axes([ng.make_axis(name=ax.name + 'p', length=ax.length)
                                    for ax in axes_rsck])
    axes_nmpqk = ng.make_axes([cf.ax_o[-1], cf.ax_o[1], cf.ax_o[2], cf.ax_o[3], cf.ax_o[0]])

    # broadcast input / filter axes
    input_var = ng.variable(cf.ax_i).named('input')
    input_val = np.ones(input_var.axes.lengths)

    filter_rsck_prime = ng.variable(axes_rsck_prime).named('filter')
    filter_var = filter_rsck_prime
    filter_rsck = ng.cast_axes(filter_rsck_prime, axes_rsck).named('frsck')
    filter_trsck = ng.expand_dims(filter_rsck, cf.ax_f[1], 0).named('ftrsck')
    filter_ctrsk = ng.axes_with_order(filter_trsck, axes=cf.ax_f).named('ctrsk')

    # convolution
    output_kmpqn = ng.convolution(cf.conv_params, input_var, filter_ctrsk, axes=cf.ax_o)
    output_nmpqk = ng.axes_with_order(output_kmpqn, axes=axes_nmpqk)

    # slice away the oD
    out_slicing = [slice(None), 0, slice(None), slice(None), slice(None)]
    output_npqk = ng.tensor_slice(output_nmpqk, out_slicing)

    output = ng.flatten_at(output_npqk, idx=1)

    # cost and grad
    cost = ng.sum(output, out_axes=())

    filter_val = np.ones(filter_var.axes.lengths)

    with ExecutorFactory() as factory:

        conv_comp = factory.executor(output, filter_var, input_var)
        grad_filter_num_comp = factory.numeric_derivative(cost, filter_var, 1.0, input_var)
        grad_filter_sym_comp = factory.derivative(cost, filter_var, input_var)

        grad_input_num_comp = factory.numeric_derivative(cost, input_var, 1.0, filter_var)
        grad_input_sym_comp = factory.derivative(cost, input_var, filter_var)

        conv_val = conv_comp(filter_val, input_val)
        conv_val_num = np.empty_like(conv_val)
        conv_val_num.fill(np.prod(cf.ax_f.lengths[:-1]))
        ng.testing.assert_allclose(conv_val, conv_val_num)

        grad_filter_num_val = grad_filter_num_comp(filter_val, input_val)
        grad_filter_sym_val = grad_filter_sym_comp(filter_val, input_val)
        ng.testing.assert_allclose(grad_filter_num_val, grad_filter_sym_val)

        grad_input_num_val = grad_input_num_comp(input_val, filter_val)
        grad_input_sym_val = grad_input_sym_comp(input_val, filter_val)
        ng.testing.assert_allclose(grad_input_num_val, grad_input_sym_val)
Beispiel #15
0
def test_sequential_side(M):
    x1_np = 2
    x2_np = 3
    b_np = 1
    x_np = np.array([1, 2, 3], dtype=np.float32)

    x = ng.variable([M], initial_value=x_np)
    x1 = ng.persistent_tensor(axes=(), initial_value=x1_np)
    x2 = ng.persistent_tensor(axes=(), initial_value=x2_np)
    x1_vo = ng.value_of(x1)
    x2_vo = ng.value_of(x2)
    b = ng.persistent_tensor(axes=(), initial_value=b_np)

    y = ng.sequential([
        x1_vo, x2_vo,
        ng.assign(x1,
                  ng.sum(x, out_axes=()) + x1 * b + (1 - b)),
        ng.assign(x2,
                  ng.mean(x, out_axes=()) + x2 * b + (1 - b)), x * 2
    ])

    with ExecutorFactory() as ex:
        main_effect = ex.executor((y, x1_vo, x2_vo, x1, x2))
        current_values = ex.executor((x1, x2))

        # Run main path #1
        y_val, x1_init_val, x2_init_val, x1_final_val, x2_final_val = main_effect(
        )
        y_np = x_np * 2

        assert np.allclose(y_val, y_np)
        assert np.allclose(x1_init_val, x1_np)
        assert np.allclose(x2_init_val, x2_np)
        x1_np = np.sum(x_np) + x1_np * b_np + (1 - b_np)
        x2_np = np.mean(x_np) + x2_np * b_np + (1 - b_np)
        assert np.allclose(x1_final_val, x1_np)
        assert np.allclose(x2_final_val, x2_np)

        x1_val, x2_val = current_values()
        assert np.allclose(x1_val, x1_np)
        assert np.allclose(x2_val, x2_np)

        # Run main path #2 (Should be the same as before)
        y_val, x1_init_val, x2_init_val, x1_final_val, x2_final_val = main_effect(
        )
        y_np = x_np * 2

        assert np.allclose(y_val, y_np)
        assert np.allclose(x1_init_val, x1_np)
        assert np.allclose(x2_init_val, x2_np)
        x1_np = np.sum(x_np) + x1_np * b_np + (1 - b_np)
        x2_np = np.mean(x_np) + x2_np * b_np + (1 - b_np)
        assert np.allclose(x1_final_val, x1_np)
        assert np.allclose(x2_final_val, x2_np)
Beispiel #16
0
def test_convolution_backprop(transformer_factory):
    """
    test convolution backprop path
    """
    N = 128
    C, K = 3, 2
    D, T = 1, 1
    H = W = 32
    R = S = 2

    padding = dict(pad_d=0, pad_h=0, pad_w=0)
    strides = dict(str_d=1, str_h=1, str_w=1)
    dilation = dict(dil_d=1, dil_h=1, dil_w=1)
    conv_params = padding.copy()
    conv_params.update(strides)
    conv_params.update(dilation)

    ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
    ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])
    ax_i.set_shape((C, D, H, W, N))
    ax_f.set_shape((C, T, R, S, K))
    ax_o = ng.make_axes([
        ng.make_axis(roles=[ar.features_input]).named('C'),
        ng.make_axis(roles=[ar.features_0]).named('D'),
        ng.make_axis(roles=[ar.features_1]).named('H'),
        ng.make_axis(roles=[ar.features_2]).named('W'), ax.N
    ])

    ax_o[:-1].set_shape((K, output_dim(D, T, padding['pad_d'],
                                       strides['str_d']),
                         output_dim(H, R, padding['pad_h'], strides['str_h']),
                         output_dim(W, S, padding['pad_w'], strides['str_w'])))

    inputs = ng.placeholder(axes=ax_i)
    filters = ng.placeholder(axes=ax_f)

    # randomly initialize
    input_value = rng.uniform(-1, 1, ax_i)
    filter_value = rng.uniform(-1, 1, ax_f)

    assert input_value.shape == ax_i.lengths
    assert filter_value.shape == ax_f.lengths

    output = ng.sum(ng.convolution(conv_params, inputs, filters, ax_o),
                    out_axes=())

    with ExecutorFactory() as factory:
        dcdf_sym_fun = factory.derivative(output, filters, inputs)
        dcdf_num_fun = factory.numeric_derivative(output, filters, .01, inputs)
        dcdf_sym_val = dcdf_sym_fun(filter_value, input_value)
        dcdf_num_val = dcdf_num_fun(filter_value, input_value)

        ng.testing.assert_allclose(dcdf_sym_val, dcdf_num_val, rtol=1)
Beispiel #17
0
def test_broadcast_deriv_reorder(transformer_factory):
    H = ng.make_axis(2)
    W = ng.make_axis(3)

    x = ng.constant(np.random.rand(2, 3), axes=[H, W])
    x_broadcast = ng.broadcast(x, [W, H])
    x_sum = ng.sum(x_broadcast, out_axes=())
    dx = ng.deriv(x_sum, x)

    with ExecutorFactory() as ex:
        dx_fun = ex.executor(dx)
        ng.testing.assert_allclose(dx_fun(), np.ones((2, 3)))
Beispiel #18
0
def test_tensor_sum_single_reduction_axes(transformer_factory):
    """TODO."""
    Y = ng.make_axis(length=2)
    N = ng.make_axis(length=2)

    a = ng.constant(np.array([[1.0, 1.0], [1.0, 1.0]], dtype='float32'), [N, Y])

    b = ng.sum(a, reduction_axes=Y)

    with executor(b) as ex:
        result = ex()
        ng.testing.assert_allclose(result, [2.0, 2.0])
Beispiel #19
0
def test_conv_flatten_deriv(transformer_factory):
    """
    Test deriv of conv followed by flatten
    """
    # set shape
    C, D, H, W, N = (3, 1, 28, 28, 8)
    C, T, R, S, K = (3, 1, 5, 5, 32)

    # i, f, o axes
    ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])
    ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])
    ax_o = ng.make_axes([
        ng.make_axis(32, roles=[ar.Channel]),
        ng.make_axis(1, roles=[ar.Depth]),
        ng.make_axis(24, roles=[ar.Height]),
        ng.make_axis(24, roles=[ar.Width]), ax.N
    ])
    ax_i.set_shape((C, D, H, W, N))
    ax_f.set_shape((C, T, R, S, K))
    params = dict(pad_d=0, pad_h=0, pad_w=0, str_d=1, str_h=1, str_w=1)
    axes_rsck = ng.make_axes([ax.R, ax.S, ax.C, ax.K])
    axes_rsck_prime = ng.make_axes(
        [ng.make_axis(l) for l in axes_rsck.lengths])

    # broadcast input / filter axes
    image = ng.constant(np.ones(ax_i.lengths), ax_i)
    filter = ng.variable(axes_rsck_prime, initial_value=np.ones((R, S, C, K)))
    filter_casted = ng.cast_axes(filter, axes_rsck)
    filter_casted = ng.expand_dims(filter_casted, ax.T, 0)
    filter_casted = ng.axes_with_order(filter_casted, axes=ax_f)

    # convolution
    output = ng.convolution(params, image, filter_casted, axes=ax_o)
    oC, oD, oH, oW, oN = output.axes
    output = ng.axes_with_order(output,
                                axes=ng.make_axes([oN, oD, oH, oW, oC]))

    # slice away the oD
    out_slicing = [slice(None), 0, slice(None), slice(None), slice(None)]
    conv = ng.Slice(output, out_slicing)
    flatten = ng.flatten_at(conv, idx=1)

    # cost and grad
    cost = ng.sum(flatten, reduction_axes=flatten.axes)
    grad = ng.deriv(cost, filter)

    # compute
    conv_grad_comp = executor([conv, grad])
    conv_val, grad_val = conv_grad_comp()

    assert np.allclose(conv_val, np.zeros_like(conv_val) + 75.)
    assert np.allclose(grad_val, np.zeros_like(grad_val) + 4608.)
Beispiel #20
0
def test_4d_chained(transformer_factory, input_axes):
    x_val = np.absolute(np.random.randn(*input_axes.lengths))
    y_val = np.absolute(np.random.randn(*input_axes.lengths))
    x = ng.constant(x_val, input_axes)
    y = ng.constant(y_val, input_axes)

    im = ng.reciprocal(x)
    out = ng.sum(ng.add(im, y), reduction_axes=input_axes[0])

    with executor(out) as ex:
        graph_val = ex()
    np_val = np.sum(np.add(np.reciprocal(x_val), y_val), 0)
    np.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
def test_4d_chained(transformer_factory, input_axes):

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    # Limitting minimum absolute value for tensors being input to reciprocal operation to 1/7.9
    #
    # This is consequence of the above and flexpoint accuracy.
    # Numbers very small have poor absolute accuracy. When reciprocal of them is calculated the
    # results becomes very large and has even worse accuracy. When small numbers would be accepted
    # as an input to reciprocal in the test the absolute maximum value of the result is undefined
    # and so absolute tolerance.
    # To have possibility to set atol in the test and test could pass with it minimum element of
    # the tensor that is input to reciprocal operation has to be limited.

    is_flex = is_flex_factory(transformer_factory)
    clip_val_max = 7.9 if is_flex else 0
    clip_val_min = 1.0 / 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(input_axes,
                               clip_min=clip_val_min,
                               clip_max=clip_val_max)
    y_val = rng.randn_abs_clip(input_axes, clip_max=clip_val_max)
    x = ng.constant(x_val, input_axes)
    y = ng.constant(y_val, input_axes)

    im = ng.reciprocal(x)
    out = ng.sum(ng.add(im, y), reduction_axes=input_axes[0])

    with executor(out) as ex:
        graph_val = ex()
    np_val = np.sum(np.add(np.reciprocal(x_val), y_val), 0)

    # atol_multiplier = 15 * x_val.shape[0]
    #
    # x_val.shape[0] is number elements added together in operation
    # ng.sum(X, reduction_axes=input_axes[0])
    #
    # 15 is calculated the following way:
    #
    # Input tensor has values from the range 1/7.9 - 7.9
    # For DEC=12 absolute error is equal to 0.5*2^-12 = 0.000122
    # 1/7.9 = 0.126582 with this error becomes 0.126704
    # Reciprocal of 1/7.9 is 7.9
    # Reciprocal of 1/7.9 + err = 7.892389
    # Absolute difference is 0.007611
    # It is 15.2 times larger then atol limit 5e-4 from Argon transformer
    ng.testing.assert_allclose(graph_val,
                               np_val,
                               rtol=1e-4,
                               atol_multiplier=15 * x_val.shape[0])
Beispiel #22
0
def test_rng_repetition():
    """
    Tests rng ops, to make sure they run every execution and not just initialization
    """
    axes = ng.make_axes([ng.make_axis(2), ng.make_axis(2)])
    x = ng.variable(initial_value=np.array([[1, 2], [3, 4]]), axes=axes)
    y = ng.uniform(x)
    mysum = ng.sum(y)
    trans = ng.transformers.make_transformer()
    rand_comp = trans.computation(mysum)
    val1 = rand_comp().copy()
    val2 = rand_comp().copy()
    assert val1 != val2
    trans.close()
Beispiel #23
0
def test_mean(transformer_factory, input_tensor):
    inputs = input_tensor
    targets = ng.placeholder(inputs.axes)

    inp_stat = ng.mean(inputs, reduction_axes=inputs.axes.batch_axes())
    err = ng.sum(inp_stat - targets, out_axes=())
    with executor(err, inputs, targets) as comp_func:

        input_value = rng.uniform(-1, 1, inputs.axes)
        target_value = rng.uniform(-1, 1, targets.axes)
        ng_f_res = comp_func(input_value, target_value)

        np_f_res = np.sum(np.mean(input_value, axis=1, keepdims=True) - target_value)

        ng.testing.assert_allclose(np_f_res, ng_f_res, atol=1e-4, rtol=1e-4)
Beispiel #24
0
def test_flatten_deriv_simplified():
    """
    Test derivative with dot and flatten
    """
    ax_N = ng.make_axis(length=3)
    ax_Y = ng.make_axis(length=2)

    x = ng.placeholder(ng.make_axes([ax_N]))
    w = ng.constant([5, 2], axes=ng.make_axes([ax_Y]))
    logits = ng.dot(x, w)
    cost = ng.sum(logits, reduction_axes=logits.axes)

    delta = 0.001
    u = rng.uniform(.1, 5.0, x.axes)
    check_derivative(cost, x, delta, u, atol=1e-2, rtol=1e-2)
Beispiel #25
0
    def __call__(self, cost_func, variable_scope=None):
        all_updates = []
        batch_cost = ng.sum(cost_func, out_axes=())
        batch_size = cost_func.axes.batch_axis().length

        selected_variables = batch_cost.variables()
        if variable_scope is not None:
            selected_variables = [op for op in selected_variables if op.scope == variable_scope]
        grads = [ng.deriv(batch_cost, v) / batch_size for v in selected_variables]
        scale_factor = clip_gradient_norm(grads, batch_size, self.gradient_clip_norm)

        for variable, grad in zip(selected_variables, grads):
            updates = self.variable_update(variable, grad, scale_factor)
            all_updates.append(updates)
        updates = ng.doall(all_updates)
        grads = ng.doall(grads)
        return ng.sequential([grads, updates, 0])
Beispiel #26
0
def test_sum(transformer_factory):
    """
    sum 1-D tensor
    """

    nelems = 10
    H = ng.make_axis(length=nelems)
    x = ng.placeholder(H)
    y = ng.sum(x)

    with executor(y, x) as ex:
        sum_executor = ex

        xval = np.array(list(range(1, 10)) + [-1])
        xval[2] += +1 + 10

        assert (sum_executor(xval) == sum(xval))
Beispiel #27
0
def test_gdm(random_learning_rate, random_momentum_coef, wdecay, nesterov,
             transformer_factory):

    # Setup the baseline and reference optimizers to be tested
    gdm_args = {
        'learning_rate': random_learning_rate,
        'momentum_coef': random_momentum_coef,
        'wdecay': wdecay,
        'nesterov': nesterov
    }

    gdm_reference = GDMReference(**gdm_args)
    gdm = GradientDescentMomentum(**gdm_args)

    # Set up data placeholders
    C = ng.make_axis(20)
    N = ng.make_axis(32, name='N')

    data = ng.placeholder([C, N])
    target = ng.placeholder([N])

    # params to be updated using GDM
    np_W = np.random.rand(C.length)
    W = ng.variable([C], initial_value=np_W)

    # Set up op graph
    cost = ng.sum(target - ng.dot(W, data), out_axis=())
    updated_weights = ng.sequential([gdm(cost), W])

    def data_generator(iteration_count):
        for i in range(iteration_count):
            yield (np.random.rand(C.length, N.length).astype('float32'),
                   np.random.rand(N.length).astype('float32'))

    # Set up the computation and run the "train" loop
    with ExecutorFactory() as ex:
        gdm_baseline = ex.transformer.computation(updated_weights, data,
                                                  target)
        mock_dataset = data_generator(20)

        for x, y in mock_dataset:
            ng_W = gdm_baseline(x, y)  # updated weights for ngraph optimizer
            np_W = gdm_reference(
                x, np_W)  # updated weights for reference optimizer

            ng.testing.assert_allclose(np_W, ng_W, rtol=1e-3)
Beispiel #28
0
    def __call__(self, cost_func):
        with ng.Op.saved_user_deps():
            state_updates, param_updates = [], []
            batch_cost = ng.sum(cost_func, out_axes=())
            batch_size = cost_func.axes.batch_axes()[0].length

            grads = [
                ng.deriv(batch_cost, v) / batch_size
                for v in batch_cost.variables()
            ]
            scale_factor = clip_gradient_norm(
                grads) if self.gradient_clip_norm else 1

            epsilon, decay = (self.epsilon, self.decay_rate)
            for i, (variable,
                    grad) in enumerate(zip(batch_cost.variables(), grads)):
                grad = clip_gradient_value(grad, self.gradient_clip_value)

                state = ng.persistent_tensor(axes=variable.axes,
                                             initial_value=0.)
                state_updates.append(
                    ng.assign(lvalue=state,
                              rvalue=decay * state +
                              (1.0 - decay) * ng.square(grad)).named(
                                  'state_u_%s' % i))

                param_updates.append(
                    ng.assign(
                        lvalue=variable,
                        rvalue=variable -
                        ((scale_factor * grad * self.learning_rate) /
                         (ng.sqrt(state + epsilon) + epsilon)),
                    ).named('var_u_%s' % i))

            lr_update = [
                ng.assign(
                    self.learning_rate,
                    self.schedule.get_learning_rate(self.learning_rate,
                                                    self.iteration_index))
            ]

            updates = ng.doall(state_updates + param_updates + lr_update)
            self.iteration_index += 1

        return updates
Beispiel #29
0
def test_idempotent_axes_a():
    """
    Test test axes transformations with autodiff, case a, reference test
    """
    with ExecutorFactory() as ex:
        axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])

        w = ng.variable(axes, initial_value=np.ones((3, 1)))
        result = w + w

        result = ng.cast_axes(result, axes)
        cost = ng.sum(result, reduction_axes=axes)
        grad = ng.deriv(cost, w)

        grad_comp = ex.executor(grad)
        cost_comp = ex.executor(cost)

        assert cost_comp() == 6.0
        assert np.array_equal(grad_comp(), np.ones((3, 1)) * 2.)
Beispiel #30
0
def test_shuffled_deriv(transformer_factory):
    # This gets the axes of a delta in a generate_add_delta in a different order than the
    # value being updated
    C = ng.make_axis(length=3)
    T = ng.make_axis(length=1)
    R = ng.make_axis(length=5)
    S = ng.make_axis(length=5)

    axes = [R, S, C]
    v = ng.variable([ng.make_axis(_.length) for _ in axes])
    rsc = ng.cast_axes(v, axes)
    trsc = ng.expand_dims(rsc, T, 0)
    ctrs = ng.axes_with_order(trsc, axes=[C, T, R, S])
    cost = ng.sum(ctrs, out_axes=None)
    grad = ng.deriv(cost, v)

    with ExecutorFactory() as ex:
        d_fun = ex.executor(grad)
        d_fun()
Beispiel #31
0
def test_idempotent_axes_c():
    """
    Test test axes transformations with autodiff, case c, with broadcast,
    slice, cast and dim-shuffle
    """
    with ExecutorFactory() as ex:
        axes = ng.make_axes([ng.make_axis(3), ng.make_axis(1)])
        result_axes = [ng.make_axis(length=axis.length) for axis in axes]

        # variable
        w = ng.variable(axes, initial_value=np.ones((3, 1)))

        # broadcast l / r, introducing dummy length 1 axes
        l = ng.broadcast(w, axes)
        r = ng.broadcast(w, axes)

        # slice
        axes_slice = [slice(None, None, None), slice(None, None, None)]
        l_sliced = ng.tensor_slice(l, axes_slice)
        r_sliced = ng.tensor_slice(r, axes_slice)

        # cast r
        r_sliced_casted = ng.cast_axes(r_sliced, axes)

        # perform add
        result = ng.add(l_sliced, r_sliced_casted)

        # cast / dimshuffle
        result = ng.cast_axes(result, result_axes)
        result = ng.axes_with_order(result, result_axes)

        # cost and grad
        cost = ng.sum(result, reduction_axes=result.axes)
        grad = ng.deriv(cost, w)

        grad_comp = ex.executor(grad)
        cost_comp = ex.executor(cost)

        cost_comp_ng = cost_comp()
        grad_comp_ng = grad_comp()
        grad_comp_np = np.ones((3, 1)) * 2.
        assert cost_comp_ng == 6.0
        assert np.array_equal(grad_comp_ng, grad_comp_np)
Beispiel #32
0
    kb_ents_to_type=babi.kb_ents_to_type,
    kb_ents_to_cand_idxs=babi.kb_ents_to_cand_idxs,
    match_type_idxs=babi.match_type_idxs,
    nhops=args.nhops,
    eps=args.eps,
    init=GaussianInit(
        mean=0.0,
        std=0.1))

# Compute answer predictions
a_pred, attention = memn2n(inputs)

# specify loss function, calculate loss and update weights
loss = ng.cross_entropy_multi(a_pred, inputs['answer'], usebits=True)

mean_cost = ng.sum(loss, out_axes=[])
optimizer = Adam(learning_rate=args.lr)
updates = optimizer(loss)

batch_cost = ng.sequential([updates, mean_cost])

# provide outputs for bound computation
train_outputs = dict(batch_cost=batch_cost, train_preds=a_pred)

with Layer.inference_mode_on():
    a_pred_inference, attention_inference = memn2n(inputs)
    eval_loss = ng.cross_entropy_multi(
        a_pred_inference, inputs['answer'], usebits=True)

interactive_outputs = dict(
    test_preds=a_pred_inference,
    def __call__(self, inputs):
        query = ng.cast_axes(
            inputs['user_utt'], [
                self.batch_axis, self.sentence_rec_axis])

        # Query embedding [batch, sentence_axis, F]
        q_emb = self.LUT_A(query)

        # Multiply by position encoding and sum
        u_0 = ng.sum(q_emb, reduction_axes=[self.sentence_rec_axis])

        # Start a list of the internal states of the model. Will be appended to
        # after each memory hop
        u = [u_0]

        for hopn in range(self.nhops):
            story = ng.cast_axes(
                inputs['memory'], [
                    self.batch_axis, self.memory_axis, self.sentence_rec_axis])

            # Re-use the query embedding matrix to embed the memory sentences
            # [batch, memory_axis, sentence_axis, F]
            m_emb_A = self.LUT_A(story)
            m_A = ng.sum(
                m_emb_A, reduction_axes=[
                    self.sentence_rec_axis])  # [batch, memory_axis, F]

            # Compute scalar similarity between internal state and each memory
            # Equivalent to dot product between u[-1] and each memory in m_A
            # [batch, memory_axis]
            dotted = ng.sum(u[-1] * m_A, reduction_axes=[self.embedding_axis])

            # [batch, memory_axis]
            probs = ng.softmax(dotted, self.memory_axis)

            # Renormalize probabilites according to non-empty memories
            probs_masked = probs * inputs['memory_mask']
            renorm_sum = ng.sum(
                probs_masked, reduction_axes=[
                    self.memory_axis]) + self.eps
            probs_renorm = (probs_masked + self.eps) / renorm_sum

            # Compute weighted sum of memory embeddings
            o_k = ng.sum(
                probs_renorm * m_A,
                reduction_axes=[
                    self.memory_axis])  # [batch, F]

            # Add the output back into the internal state and project
            u_k = ng.cast_axes(ng.dot(self.R_proj, o_k), [
                               self.embedding_axis, self.batch_axis]) + u[-1]  # [batch, F_proj]

            # Add new internal state
            u.append(u_k)

        if self.use_match_type:
            # [batch_axis, cand_axis, cand_rec_axis, F]
            self.cands_mat = inputs['cands_mat']

        # Embed all candidate responses using LUT_W
        # [<batch_axis>, cand_axis, cand_rec_axis, F]
        cand_emb_W = self.LUT_W(self.cands_mat)
        # No position encoding added yet
        cands_mat_emb = ng.sum(
            cand_emb_W, reduction_axes=[
                self.cand_rec_axis])  # [<batch_axis>, cand_axis, F]

        # Compute predicted answer from product of final internal state
        # and embedded candidate answers
        # a_logits = ng.dot(cands_mat_emb, u[-1]) # [batch, cand_axis]
        # [batch, cand_axis]
        a_logits = ng.sum(u[-1] * cands_mat_emb,
                          reduction_axes=[self.embedding_axis])

        # rename V to vocab_axis to match answer
        a_logits = ng.cast_axes(a_logits, [self.batch_axis, self.cand_axis])
        a_pred = ng.softmax(a_logits, self.cand_axis)

        return a_pred, probs_renorm