Пример #1
0
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
    I = x.tensor
    ndims = I.shape.ndims
    if reduction_axes == None:
        raw_axes = [ndims - 1]
    else:
        raw_axes = reduction_axes
    axes = [_normalize_axis(x, ndims, 'normalize_batch_in_training') for x in raw_axes]
    m = mean(x, axis=axes, keepdims=True)
    v = var(x, axis=axes, keepdims=True)

    # We reshape beta & gamma to the target shape; this discards shape information on beta & gamma but matches the behavior with the TF backend
    dims = edsl.TensorDims(ndims)
    I.bind_dims(*dims)
    for ax in axes:
        dims[ax] = 1
    if beta is not None:
        beta = reshape(beta, dims)
    if gamma is not None:
        gamma = reshape(gamma, dims)

    normalized_tensor = batch_normalization(x=x,
                                            mean=m,
                                            var=v,
                                            beta=beta,
                                            gamma=gamma,
                                            epsilon=epsilon)

    # squeeze the mean and variance in all cases, that's what TF does
    m = squeeze(m)
    v = squeeze(v)

    return normalized_tensor, m, v
Пример #2
0
def matmul_2_1(A, b):
    I, J = edsl.TensorDims(2)
    i, j = edsl.TensorIndexes(2)
    A.bind_dims(I, J)
    b.bind_dims(J)
    C = edsl.TensorOutput(I)
    C[(i)] += A[i, j] * b[j]
    return C
Пример #3
0
def matmul_2_2(A, B):
    I, J, K = edsl.TensorDims(3)
    i, j, k = edsl.TensorIndexes(3)
    A.bind_dims(I, J)
    B.bind_dims(J, K)
    C = edsl.TensorOutput(I, K)
    C[(i, k)] += A[i, j] * B[j, k]
    return C
Пример #4
0
def dist(a, b):
    I, J = edsl.TensorDims(2)
    i, j = edsl.TensorIndexes(2)
    a.bind_dims(I)
    neg = -b
    neg.bind_dims(J)
    C = edsl.TensorOutput(I, J)
    C[(i, j)] = a[i] + neg[j]
    return C
Пример #5
0
def batch_flatten(x):
    I = x.tensor
    I_dims = edsl.TensorDims(I.shape.ndims)
    I.bind_dims(*I_dims)
    if len(I_dims) == 1:
        return reshape(x, [I_dims[0], 1])
    if len(I_dims) == 2:
        return x
    return reshape(x, [I_dims[0]] + [functools.reduce((lambda x, y: x * y), I_dims[1:])])
Пример #6
0
def zeros_like(x, dtype=None, name=None):
    value = np.full((1), 0, dtype=dtype or floatx())
    zero = _create_var('a_zero', value)
    I = x.tensor
    ndim = I.shape.ndims
    dims = edsl.TensorDims(ndim)
    idxs = edsl.TensorIndexes(ndim)
    I.bind_dims(*dims)
    O = edsl.TensorOutput(*dims)
    O[idxs] = zero[0]
    return _KerasNode('zeros_like', name=name, tensor=O)
Пример #7
0
def partial(F, wrt, delta):
    F_neg = -F
    dims = edsl.TensorDims(3)
    x, y, z = edsl.TensorIndexes(3)
    F.bind_dims(*dims)
    O = edsl.TensorOutput(*dims)
    if wrt == 'x':
        O[x, y, z] = F[x + 1, y, z] + F_neg[x - 1, y, z]
    elif wrt == 'y':
        O[x, y, z] = F[x, y + 1, z] + F_neg[x, y - 1, z]
    elif wrt == 'z':
        O[x, y, z] = F[x, y, z + 1] + F_neg[x, y, z - 1]
    return O / (2.0 * delta)
Пример #8
0
def batch_dot(x, y, axes=None, name=None):
    X = x.tensor
    Y = y.tensor
    if isinstance(axes, six.integer_types):
        axes = (axes, axes)
    if axes is None:
        axes = (X.shape.ndims - 1, Y.shape.ndims - 2)
    PLAIDML_BATCHDOT_TF_BEHAVIOR = os.getenv('PLAIDML_BATCHDOT_TF_BEHAVIOR')
    if PLAIDML_BATCHDOT_TF_BEHAVIOR:
        _report_unimplemented('batch_dot')
    else:
        # replicate theano/documentation-specified behavior
        first_dim = edsl.TensorDim()
        first_idx = edsl.TensorIndex()
        batch_dim = edsl.TensorDim()
        batch_idx = edsl.TensorIndex()
        xdims = edsl.TensorDims(X.shape.ndims)
        xdims[0] = first_dim
        xdims[axes[0]] = batch_dim
        xidxs = edsl.TensorIndexes(X.shape.ndims)
        xidxs[0] = first_idx
        xidxs[axes[0]] = batch_idx
        ydims = edsl.TensorDims(Y.shape.ndims)
        ydims[0] = first_dim
        ydims[axes[1]] = batch_dim
        yidxs = edsl.TensorIndexes(Y.shape.ndims)
        yidxs[0] = first_idx
        yidxs[axes[1]] = batch_idx
        odims = [xdims[N] for N in range(len(xdims)) if N != axes[0]
                ] + [ydims[N] for N in range(1, len(ydims)) if N != axes[1]]
        oidxs = [xidxs[N] for N in range(len(xidxs)) if N != axes[0]
                ] + [yidxs[N] for N in range(1, len(yidxs)) if N != axes[1]]
        X.bind_dims(*xdims)
        Y.bind_dims(*ydims)
        O = edsl.TensorOutput(*odims)
        O[oidxs] += X[xidxs] * Y[yidxs]
    if len(odims) == 1:
        O = plaidml_op.expand_dims(O, 1)
    return _KerasNode('batch_dot', tensor=O)
Пример #9
0
def dropout(x, level, noise_shape=None, seed=None):
    I = x.tensor
    if noise_shape is not None and len(noise_shape) != I.shape.ndims:
        raise ValueError('noise_shape ndims doesn\'t match input ndims')
    if noise_shape is None:
        shape = edsl.TensorDims(I.shape.ndims)
        I.bind_dims(*shape)
    else:
        shape = noise_shape
    rng_state = _make_rng_state(seed)
    R = 1.0 - level
    M = 1.0 / R
    T = edsl.prng(rng_state.tensor, shape)
    O = edsl.select(T < R, I * M, 0.0)
    return _KerasNode('dropout', tensor=O)
Пример #10
0
def one_hot(indices, num_classes):
    #Note: does not error check for entries in indices that are >= num_classes
    count = variable(np.array(range(num_classes)), dtype='int32').tensor
    I = indices.tensor
    I_ndims = I.shape.ndims
    I_dims = edsl.TensorDims(I_ndims)
    I_idxs = edsl.TensorIndexes(I_ndims)
    C = edsl.TensorDim()
    c = edsl.TensorIndex()
    O_dims = I_dims + [C]
    O_idxs = I_idxs + [c]
    I.bind_dims(*I_dims)
    count.bind_dims(C)
    O = edsl.TensorOutput(*O_dims)
    O[O_idxs] = I[I_idxs] == count[c]
    return _KerasNode('one_hot', name='one_hot', tensor=O)
Пример #11
0
def partial_chi(F, wrt, delta):
    dims = edsl.TensorDims(3)
    x, y, z = edsl.TensorIndexes(3)
    F.bind_dims(*dims)
    DF_left = edsl.TensorOutput(*dims)
    DF_right = edsl.TensorOutput(*dims)

    if wrt == 'x':
        DF_right[x, y, z] = F[x + 1, y, z]
        DF_left[x, y, z] = F[x - 1, y, z]
    elif wrt == 'y':
        DF_right[x, y, z] = F[x, y + 1, z]
        DF_left[x, y, z] = F[x, y - 1, z]
    elif wrt == 'z':
        DF_right[x, y, z] = F[x, y, z + 1]
        DF_left[x, y, z] = F[x, y, z - 1]

    DF_chi_right = edsl.select(DF_right < 0, 1, 0)
    DF_chi_left = edsl.select(DF_left < 0, -1, 0)
    return (DF_chi_right + DF_chi_left) / (2.0 * delta)
Пример #12
0
def categorical_crossentropy(target, output, from_logits=False):
    if from_logits:
        output = softmax(output)
    elif output.opname != 'softmax':
        output /= sum(output, axis=(-1,), keepdims=True)
        output = clip(output, epsilon(), 1.0 - epsilon())
    T = target.tensor
    O = output.tensor
    ndims = O.shape.ndims
    fixed_dims = edsl.TensorDims(ndims - 1)
    fixed_idxs = edsl.TensorIndexes(ndims - 1)
    Y = edsl.TensorDim()
    y = edsl.TensorIndex()
    input_dims = fixed_dims + [Y]
    O.bind_dims(*input_dims)
    T.bind_dims(*input_dims)
    LO = edsl.log(O)
    TR = edsl.TensorOutput(*fixed_dims)
    TR[fixed_idxs] += T[fixed_idxs + [y]] * LO[fixed_idxs + [y]]
    R = -TR
    return _KerasNode('categorical_crossentropy', tensor=R)
Пример #13
0
def softmax(x, axis=None, name=None):
    if name is None:
        name = 'softmax'
    I = x.tensor
    ndims = I.shape.ndims
    I_dims = edsl.TensorDims(ndims)
    I.bind_dims(*I_dims)
    if axis is None:
        axis = ndims - 1
    axis = _normalize_axis(axis=axis, ndims=ndims, name=name + ' (softmax)')
    if ndims == 2 and axis == 1:
        return _KerasNode(name, tensor=plaidml_op.softmax(I, axis=1))

    if axis == 0:
        group = 1
    else:
        group = functools.reduce(lambda x, y: x * y, I_dims[:axis])
    values = functools.reduce(lambda x, y: x * y, I_dims[axis:])
    flat_x = reshape(x, (group, values))
    result = _KerasNode(name, tensor=plaidml_op.softmax(flat_x.tensor, axis=1))
    return reshape(result, I_dims)
Пример #14
0
 def time_expand(val, ii, t, prev):
     I = val.tensor
     ndmo = I.shape.ndims - 1
     if (ndmo < 0):
         raise PlaidMLKerasException('output values must have a batch size dimension')
     dims = edsl.TensorDims(ndmo)
     idxs = edsl.TensorIndexes(ndmo)
     batch_dim = edsl.TensorDim()
     batch_idx = edsl.TensorIndex()
     I_dims = [batch_dim] + dims
     I_idxs = [batch_idx] + idxs
     I.bind_dims(*I_dims)
     O_dims = [batch_dim] + [t] + dims
     O = edsl.TensorOutput(*O_dims)
     O_idxs = [batch_idx] + [ii] + idxs
     O[O_idxs] = I[I_idxs]
     if prev is None:
         if ii != 0:
             raise RuntimeError(
                 'Generating RNN at time step {} with no previous time step'.format(ii))
     else:
         O.use_default(prev.tensor)
     return _KerasNode('time_expand', name='time_expand', tensor=O)
Пример #15
0
def main():
    print("""
PlaidML Setup ({0})

Thanks for using PlaidML!

Some Notes:
  * Bugs and other issues: https://github.com/plaidml/plaidml
  * Questions: https://stackoverflow.com/questions/tagged/plaidml
  * Say hello: https://groups.google.com/forum/#!forum/plaidml-dev
  * PlaidML is licensed under the Apache License 2.0
 """.format(plaidml.__version__))

    devices = sorted(plaidml_exec.list_devices())
    targets = sorted(plaidml_exec.list_targets())

    if not devices:
        print("""
No OpenCL devices found. Check driver installation.
Read the helpful, easy driver installation instructions from our README:
http://github.com/plaidml/plaidml
""")
        sys.exit(-1)

    dev_idx = 0
    if len(devices) > 1:
        print("""
Multiple devices detected (You can override by setting PLAIDML_DEVICE).
Please choose a default device:
""")
        for i, device in enumerate(devices):
            print("   {} : {}".format(i + 1, device))
        choices = [str(i + 1) for i in range(len(devices))]
        dev_idx = int(choice_prompt("\nDefault device", choices, "1"))
    plaidml_settings.set('PLAIDML_DEVICE', devices[dev_idx - 1])
    device = plaidml_settings.get('PLAIDML_DEVICE')

    print()
    print("Selected device:")
    print("    {}".format(device))

    print()
    print("A target determines the compiler configuration and should be matched with your device.")
    print("Please choose a default target:")
    for i, target in enumerate(targets):
        print("   {} : {}".format(i + 1, target))
    choices = [str(i + 1) for i in range(len(targets))]
    tgt_idx = int(choice_prompt("\nDefault target", choices, "1"))
    plaidml_settings.set('PLAIDML_TARGET', targets[tgt_idx - 1])
    target = plaidml_settings.get('PLAIDML_TARGET')

    print()
    print("Selected target:")
    print("    {}".format(target))

    print()
    print("Almost done. Multiplying some matrices...")
    print("Tile code:")
    print("  function (B[X, Z], C[Z, Y]) -> (A) { A[x, y : X, Y] = +(B[x, z] * C[z, y]); }")

    shape = edsl.LogicalShape(plaidml.DType.FLOAT32, [3, 3])
    B = edsl.Tensor(shape)
    C = edsl.Tensor(shape)

    X, Y, Z = edsl.TensorDims(3)
    x, y, z = edsl.TensorIndexes(3)
    B.bind_dims(X, Z)
    C.bind_dims(Z, Y)
    A = edsl.TensorOutput(X, Y)
    A[x, y] += B[x, z] * C[z, y]

    program = edsl.Program('plaidml_setup', [A])
    plaidml_exec.run(program, [(B, np.random.rand(3, 3)), (C, np.random.rand(3, 3))])
    print("Whew. That worked.")
    print()

    settings_path = plaidml_settings.get('PLAIDML_SETTINGS')
    save = choice_prompt("Save settings to {0}".format(settings_path), ["y", "n"], "y")
    if save == "y":
        plaidml_settings.save()
    print("Success!")
    print()
Пример #16
0
def flatten(x):
    I = x.tensor
    I_dims = edsl.TensorDims(I.shape.ndims)
    I.bind_dims(*I_dims)
    O_dim = functools.reduce(lambda x, y: x * y, I_dims)
    return reshape(x, [O_dim])
Пример #17
0
def sparse_categorical_crossentropy(target, output, from_logits=False):
    dims = edsl.TensorDims(output.tensor.shape.ndims)
    output.tensor.bind_dims(*dims)
    return categorical_crossentropy(
        reshape(one_hot(target, output.tensor.shape.int_dims[-1]), dims), output, from_logits)
Пример #18
0
def reshape(x, dims):
    # TODO: This needs to be more thoroughly tested with symbolic shapes
    dims = list(dims)
    I = x.tensor
    I_dims = edsl.TensorDims(I.shape.ndims)
    I.bind_dims(*I_dims)
    neg_idx = None
    for idx, dim in enumerate(dims):
        if isinstance(dim, edsl.TensorDim):
            continue
        if dim == 0 or dim is None:
            dims[idx] = I_dims[idx]  # TODO: Fix how we manage shape
        elif dim == -1:
            if neg_idx:
                raise RuntimeError('At most one dimension of size -1 may be provided in Reshape')
            neg_idx = idx
            dims[idx] = 1  # Just to simplify the size computation later
    if neg_idx is not None:
        # Compute the value to use for the -1 dimension in the
        # output shape, by making it what it needs to be in order
        # to preserve the correct number of elements in the
        # tensor.
        #
        # This code is a little tricky because symbolic values
        # (e.g. the batch size in a typical neural network) may
        # appear in both the original shape and the target shape.
        # Naively multiplying the original shape's dimensions and
        # dividing by the target shape's dimensions (excluding the
        # -1 dimension) would produce a symbolic value.
        #
        # So:
        #
        # We scan the input dimensions, counting the number of
        # instances of each symbolic size encountered and
        # multiplying together the non-symbolic sizes into the
        # numerator.
        #
        # We then scan the output dimensions.  Where there's a
        # symbolic size, we check and see if we have a count for
        # it, and decrement the count if we do.  Otherwise -- if
        # we don't have a count for it, or if it's not symbolic --
        # we multiply it into the denominator.
        #
        # We then take the remaining symbolic input dimensions,
        # and multiply them into the numerator -- these are the
        # dimensions that haven't been cancelled out.
        #
        # And then the size of the -1 dimension is just numerator
        # / denominator; if there are any remaining uncancelled
        # symbolic dimension sizes, the output will be symbolic,
        # but otherwise we'll come out with a concrete dimension
        # size.

        num = 1
        syms = defaultdict(int)
        for idx, dim in enumerate(I.shape.int_dims):
            if dim is None:
                syms[I_dims[idx]] += 1
            else:
                num *= dim
        den = 1
        for dim in dims:
            if isinstance(dim, edsl.TensorDim) and syms[dim] > 0:
                syms[dim] -= 1
            else:
                den *= dim
        for sym, count in syms.items():
            for _ in range(count):
                num *= sym
        dims[neg_idx] = num // den
    return _KerasNode('reshape', tensor=edsl.reshape(I, dims))
Пример #19
0
def shape(x):
    ret = _KerasNode('shape', tensor=edsl.shape(x.tensor))
    # Save the TensorDims directly on the _KerasNode, where they can be extracted if needed
    ret._RawTensorDims = edsl.TensorDims(x.tensor.shape.ndims)
    x.tensor.bind_dims(*ret._RawTensorDims)
    return ret