Ejemplo n.º 1
0
def matmul_2_2(A, B):
    I, J, K = edsl.TensorDims(3)
    i, j, k = edsl.TensorIndexes(3)
    A.bind_dims(I, J)
    B.bind_dims(J, K)
    C = edsl.TensorOutput(I, K)
    C[(i, k)] += A[i, j] * B[j, k]
    return C
Ejemplo n.º 2
0
def matmul_2_1(A, b):
    I, J = edsl.TensorDims(2)
    i, j = edsl.TensorIndexes(2)
    A.bind_dims(I, J)
    b.bind_dims(J)
    C = edsl.TensorOutput(I)
    C[(i)] += A[i, j] * b[j]
    return C
Ejemplo n.º 3
0
def dist(a, b):
    I, J = edsl.TensorDims(2)
    i, j = edsl.TensorIndexes(2)
    a.bind_dims(I)
    neg = -b
    neg.bind_dims(J)
    C = edsl.TensorOutput(I, J)
    C[(i, j)] = a[i] + neg[j]
    return C
Ejemplo n.º 4
0
def zeros_like(x, dtype=None, name=None):
    value = np.full((1), 0, dtype=dtype or floatx())
    zero = _create_var('a_zero', value)
    I = x.tensor
    ndim = I.shape.ndims
    dims = edsl.TensorDims(ndim)
    idxs = edsl.TensorIndexes(ndim)
    I.bind_dims(*dims)
    O = edsl.TensorOutput(*dims)
    O[idxs] = zero[0]
    return _KerasNode('zeros_like', name=name, tensor=O)
Ejemplo n.º 5
0
def partial_chi(F, wrt, delta):
    dims = edsl.TensorDims(3)
    x, y, z = edsl.TensorIndexes(3)
    F.bind_dims(*dims)
    DF_left = edsl.TensorOutput(*dims)
    DF_right = edsl.TensorOutput(*dims)

    if wrt == 'x':
        DF_right[x, y, z] = F[x + 1, y, z]
        DF_left[x, y, z] = F[x - 1, y, z]
    elif wrt == 'y':
        DF_right[x, y, z] = F[x, y + 1, z]
        DF_left[x, y, z] = F[x, y - 1, z]
    elif wrt == 'z':
        DF_right[x, y, z] = F[x, y, z + 1]
        DF_left[x, y, z] = F[x, y, z - 1]

    DF_chi_right = edsl.select(DF_right < 0, 1, 0)
    DF_chi_left = edsl.select(DF_left < 0, -1, 0)
    return (DF_chi_right + DF_chi_left) / (2.0 * delta)
Ejemplo n.º 6
0
def partial(F, wrt, delta):
    F_neg = -F
    dims = edsl.TensorDims(3)
    x, y, z = edsl.TensorIndexes(3)
    F.bind_dims(*dims)
    O = edsl.TensorOutput(*dims)
    if wrt == 'x':
        O[x, y, z] = F[x + 1, y, z] + F_neg[x - 1, y, z]
    elif wrt == 'y':
        O[x, y, z] = F[x, y + 1, z] + F_neg[x, y - 1, z]
    elif wrt == 'z':
        O[x, y, z] = F[x, y, z + 1] + F_neg[x, y, z - 1]
    return O / (2.0 * delta)
Ejemplo n.º 7
0
def one_hot(indices, num_classes):
    #Note: does not error check for entries in indices that are >= num_classes
    count = variable(np.array(range(num_classes)), dtype='int32').tensor
    I = indices.tensor
    I_ndims = I.shape.ndims
    I_dims = edsl.TensorDims(I_ndims)
    I_idxs = edsl.TensorIndexes(I_ndims)
    C = edsl.TensorDim()
    c = edsl.TensorIndex()
    O_dims = I_dims + [C]
    O_idxs = I_idxs + [c]
    I.bind_dims(*I_dims)
    count.bind_dims(C)
    O = edsl.TensorOutput(*O_dims)
    O[O_idxs] = I[I_idxs] == count[c]
    return _KerasNode('one_hot', name='one_hot', tensor=O)
Ejemplo n.º 8
0
def categorical_crossentropy(target, output, from_logits=False):
    if from_logits:
        output = softmax(output)
    elif output.opname != 'softmax':
        output /= sum(output, axis=(-1,), keepdims=True)
        output = clip(output, epsilon(), 1.0 - epsilon())
    T = target.tensor
    O = output.tensor
    ndims = O.shape.ndims
    fixed_dims = edsl.TensorDims(ndims - 1)
    fixed_idxs = edsl.TensorIndexes(ndims - 1)
    Y = edsl.TensorDim()
    y = edsl.TensorIndex()
    input_dims = fixed_dims + [Y]
    O.bind_dims(*input_dims)
    T.bind_dims(*input_dims)
    LO = edsl.log(O)
    TR = edsl.TensorOutput(*fixed_dims)
    TR[fixed_idxs] += T[fixed_idxs + [y]] * LO[fixed_idxs + [y]]
    R = -TR
    return _KerasNode('categorical_crossentropy', tensor=R)
Ejemplo n.º 9
0
def batch_dot(x, y, axes=None, name=None):
    X = x.tensor
    Y = y.tensor
    if isinstance(axes, six.integer_types):
        axes = (axes, axes)
    if axes is None:
        axes = (X.shape.ndims - 1, Y.shape.ndims - 2)
    PLAIDML_BATCHDOT_TF_BEHAVIOR = os.getenv('PLAIDML_BATCHDOT_TF_BEHAVIOR')
    if PLAIDML_BATCHDOT_TF_BEHAVIOR:
        _report_unimplemented('batch_dot')
    else:
        # replicate theano/documentation-specified behavior
        first_dim = edsl.TensorDim()
        first_idx = edsl.TensorIndex()
        batch_dim = edsl.TensorDim()
        batch_idx = edsl.TensorIndex()
        xdims = edsl.TensorDims(X.shape.ndims)
        xdims[0] = first_dim
        xdims[axes[0]] = batch_dim
        xidxs = edsl.TensorIndexes(X.shape.ndims)
        xidxs[0] = first_idx
        xidxs[axes[0]] = batch_idx
        ydims = edsl.TensorDims(Y.shape.ndims)
        ydims[0] = first_dim
        ydims[axes[1]] = batch_dim
        yidxs = edsl.TensorIndexes(Y.shape.ndims)
        yidxs[0] = first_idx
        yidxs[axes[1]] = batch_idx
        odims = [xdims[N] for N in range(len(xdims)) if N != axes[0]
                ] + [ydims[N] for N in range(1, len(ydims)) if N != axes[1]]
        oidxs = [xidxs[N] for N in range(len(xidxs)) if N != axes[0]
                ] + [yidxs[N] for N in range(1, len(yidxs)) if N != axes[1]]
        X.bind_dims(*xdims)
        Y.bind_dims(*ydims)
        O = edsl.TensorOutput(*odims)
        O[oidxs] += X[xidxs] * Y[yidxs]
    if len(odims) == 1:
        O = plaidml_op.expand_dims(O, 1)
    return _KerasNode('batch_dot', tensor=O)
Ejemplo n.º 10
0
 def time_expand(val, ii, t, prev):
     I = val.tensor
     ndmo = I.shape.ndims - 1
     if (ndmo < 0):
         raise PlaidMLKerasException('output values must have a batch size dimension')
     dims = edsl.TensorDims(ndmo)
     idxs = edsl.TensorIndexes(ndmo)
     batch_dim = edsl.TensorDim()
     batch_idx = edsl.TensorIndex()
     I_dims = [batch_dim] + dims
     I_idxs = [batch_idx] + idxs
     I.bind_dims(*I_dims)
     O_dims = [batch_dim] + [t] + dims
     O = edsl.TensorOutput(*O_dims)
     O_idxs = [batch_idx] + [ii] + idxs
     O[O_idxs] = I[I_idxs]
     if prev is None:
         if ii != 0:
             raise RuntimeError(
                 'Generating RNN at time step {} with no previous time step'.format(ii))
     else:
         O.use_default(prev.tensor)
     return _KerasNode('time_expand', name='time_expand', tensor=O)
Ejemplo n.º 11
0
def main():
    print("""
PlaidML Setup ({0})

Thanks for using PlaidML!

Some Notes:
  * Bugs and other issues: https://github.com/plaidml/plaidml
  * Questions: https://stackoverflow.com/questions/tagged/plaidml
  * Say hello: https://groups.google.com/forum/#!forum/plaidml-dev
  * PlaidML is licensed under the Apache License 2.0
 """.format(plaidml.__version__))

    devices = sorted(plaidml_exec.list_devices())
    targets = sorted(plaidml_exec.list_targets())

    if not devices:
        print("""
No OpenCL devices found. Check driver installation.
Read the helpful, easy driver installation instructions from our README:
http://github.com/plaidml/plaidml
""")
        sys.exit(-1)

    dev_idx = 0
    if len(devices) > 1:
        print("""
Multiple devices detected (You can override by setting PLAIDML_DEVICE).
Please choose a default device:
""")
        for i, device in enumerate(devices):
            print("   {} : {}".format(i + 1, device))
        choices = [str(i + 1) for i in range(len(devices))]
        dev_idx = int(choice_prompt("\nDefault device", choices, "1"))
    plaidml_settings.set('PLAIDML_DEVICE', devices[dev_idx - 1])
    device = plaidml_settings.get('PLAIDML_DEVICE')

    print()
    print("Selected device:")
    print("    {}".format(device))

    print()
    print("A target determines the compiler configuration and should be matched with your device.")
    print("Please choose a default target:")
    for i, target in enumerate(targets):
        print("   {} : {}".format(i + 1, target))
    choices = [str(i + 1) for i in range(len(targets))]
    tgt_idx = int(choice_prompt("\nDefault target", choices, "1"))
    plaidml_settings.set('PLAIDML_TARGET', targets[tgt_idx - 1])
    target = plaidml_settings.get('PLAIDML_TARGET')

    print()
    print("Selected target:")
    print("    {}".format(target))

    print()
    print("Almost done. Multiplying some matrices...")
    print("Tile code:")
    print("  function (B[X, Z], C[Z, Y]) -> (A) { A[x, y : X, Y] = +(B[x, z] * C[z, y]); }")

    shape = edsl.LogicalShape(plaidml.DType.FLOAT32, [3, 3])
    B = edsl.Tensor(shape)
    C = edsl.Tensor(shape)

    X, Y, Z = edsl.TensorDims(3)
    x, y, z = edsl.TensorIndexes(3)
    B.bind_dims(X, Z)
    C.bind_dims(Z, Y)
    A = edsl.TensorOutput(X, Y)
    A[x, y] += B[x, z] * C[z, y]

    program = edsl.Program('plaidml_setup', [A])
    plaidml_exec.run(program, [(B, np.random.rand(3, 3)), (C, np.random.rand(3, 3))])
    print("Whew. That worked.")
    print()

    settings_path = plaidml_settings.get('PLAIDML_SETTINGS')
    save = choice_prompt("Save settings to {0}".format(settings_path), ["y", "n"], "y")
    if save == "y":
        plaidml_settings.save()
    print("Success!")
    print()
Ejemplo n.º 12
0
def sum(R):
    idxs = edsl.TensorIndexes(3)
    O = edsl.TensorOutput()
    O[()] += R[idxs]
    return O