def forward(nn_np, data):
    """
    Given a dictionary representing a feed forward neural net and an input data matrix compute the network's output and store it within the dictionary
    :param nn: neural network dictionary
    :param data: a numpy n by m matrix where m in the number of input units in nn
    :return: the output layer activations
    """
    nn = nn_np
    nn['activations'] = []
    afData = data.ctypes.data, data.shape, data.dtype.char
    for i in range(data.shape[0]):
        nn['activations'].append(
            af.Array(data[i].ctypes.data, data[i].shape, data[i].dtype.char))
    nn['zs'] = []
    for w, s, b in map(None, nn['weights'], nn['nonlin'], nn['biases']):

        #print af.transpose(nn['activations'][-1])
        print nn['activations'][-1]
        print ""
        print w

        z = af.dot(nn['activations'][-1], w)
        nn['zs'].append(af.transpose(z))
        nn['activations'].append(s[0](af.transpose(z)))
    return nn['activations'][-1]
Exemple #2
0
def calc_arrayfire(A, b, x0, maxiter=10):
    x = af.constant(0, b.dims()[0], dtype=af.Dtype.f32)
    r = b - af.matmul(A, x)
    p = r
    for i in range(maxiter):
        Ap = af.matmul(A, p)
        alpha_num = af.dot(r, r)
        alpha_den = af.dot(p, Ap)
        alpha = alpha_num / alpha_den
        r -= af.tile(alpha, Ap.dims()[0]) * Ap
        x += af.tile(alpha, Ap.dims()[0]) * p
        beta_num = af.dot(r, r)
        beta = beta_num / alpha_num
        p = r + af.tile(beta, p.dims()[0]) * p
    res = x0 - x
    return x, af.dot(res, res)
def calc_arrayfire(A, b, x0, maxiter=10):
    x = af.constant(0, b.dims()[0], dtype=af.Dtype.f32)
    r = b - af.matmul(A, x)
    p = r
    for i in range(maxiter):
        Ap = af.matmul(A, p)
        alpha_num = af.dot(r, r)
        alpha_den = af.dot(p, Ap)
        alpha = alpha_num/alpha_den
        r -= af.tile(alpha, Ap.dims()[0]) * Ap
        x += af.tile(alpha, Ap.dims()[0]) * p
        beta_num = af.dot(r, r)
        beta = beta_num/alpha_num
        p = r + af.tile(beta, p.dims()[0]) * p
    af.eval(x)
    res = x0 - x
    return x, af.dot(res, res)
Exemple #4
0
def simple_blas(verbose=False):
    display_func = _util.display_func(verbose)
    a = af.randu(5, 5)
    b = af.randu(5, 5)

    display_func(af.matmul(a, b))
    display_func(af.matmul(a, b, af.MATPROP.TRANS))
    display_func(af.matmul(a, b, af.MATPROP.NONE, af.MATPROP.TRANS))

    b = af.randu(5, 1)
    display_func(af.dot(b, b))
Exemple #5
0
def dot(a, b):
    # Arrayfire requires that the types match for dot and matmul
    res_dtype = numpy.result_type(a, b)
    a = a.astype(res_dtype, copy=False)
    b = b.astype(res_dtype, copy=False)
    if a.ndim == 1 and b.ndim == 1:
        s = arrayfire.dot(a.d_array, b.d_array)
        return afnumpy.ndarray((), dtype=a.dtype, af_array=s)[()]

    a_shape = a.shape
    b_shape = b.shape
    if a.ndim == 1:
        a = a.reshape((a.shape[0], 1))
    if b.ndim == 1:
        b = b.reshape((b.shape[0], 1))

    if a.ndim == 2 and b.ndim == 2:
        # Notice the order of the arguments to matmul. It's not a bug!
        s = arrayfire.matmul(b.d_array, a.d_array)
        return afnumpy.ndarray(pu.af_shape(s),
                               dtype=pu.typemap(s.dtype()),
                               af_array=s)

    # Multidimensional dot is done with loops

    # Calculate the shape of the result array
    a_shape = list(a_shape)
    a_shape.pop(-1)
    b_shape = list(b_shape)
    b_shape.pop(-2)
    res_shape = a_shape + b_shape

    # Make sure the arrays are at least 3D
    if a.ndim < 3:
        a = a.reshape((1, ) + a.shape)
    if b.ndim < 3:
        b = b.reshape((1, ) + b.shape)

    # We're going to flatten the regions over which we're going to loop over
    # to make our life easier and reduce the amount of indexing code
    a = a.reshape((-1, a.shape[-2], a.shape[-1]))
    b = b.reshape((-1, b.shape[-2], b.shape[-1]))

    # Initialize the output array. The shape matches the reshape a and b.
    res = afnumpy.empty((a.shape[0], a.shape[-2], b.shape[0], b.shape[-1]),
                        dtype=a.dtype)

    # Loop through the flattened indices and calculate the matmuls
    for i in range(0, a.shape[0]):
        for j in range(0, b.shape[0]):
            res[i, :, j, :] = afnumpy.dot(a[i], b[j])

    # Finally appropriately reshape the result array
    return res.reshape(res_shape)
Exemple #6
0
def dot(a, b):
    # Arrayfire requires that the types match for dot and matmul
    res_dtype = numpy.result_type(a,b)
    a = a.astype(res_dtype, copy=False)
    b = b.astype(res_dtype, copy=False)
    if a.ndim == 1 and b.ndim == 1:
        s = arrayfire.dot(a.d_array, b.d_array)
        return afnumpy.ndarray((), dtype=a.dtype, af_array=s)[()]

    a_shape = a.shape
    b_shape = b.shape
    if a.ndim == 1:
        a = a.reshape((a.shape[0],1))
    if b.ndim == 1:
        b = b.reshape((b.shape[0],1))

    if a.ndim == 2 and b.ndim == 2:
        # Notice the order of the arguments to matmul. It's not a bug!
        s = arrayfire.matmul(b.d_array, a.d_array)
        return afnumpy.ndarray(pu.af_shape(s), dtype=pu.typemap(s.dtype()), 
                               af_array=s)

    # Multidimensional dot is done with loops    

    # Calculate the shape of the result array
    a_shape = list(a_shape)
    a_shape.pop(-1)
    b_shape = list(b_shape)
    b_shape.pop(-2)
    res_shape = a_shape + b_shape

    # Make sure the arrays are at least 3D
    if a.ndim < 3:
        a = a.reshape((1,)+a.shape)
    if b.ndim < 3:
        b = b.reshape((1,)+b.shape)

    # We're going to flatten the regions over which we're going to loop over
    # to make our life easier and reduce the amount of indexing code
    a = a.reshape((-1,a.shape[-2],a.shape[-1]))
    b = b.reshape((-1,b.shape[-2],b.shape[-1]))

    # Initialize the output array. The shape matches the reshape a and b.
    res = afnumpy.empty((a.shape[0], a.shape[-2], b.shape[0], 
                         b.shape[-1]), dtype=a.dtype)

    # Loop through the flattened indices and calculate the matmuls
    for i in range(0,a.shape[0]):
        for j in range(0,b.shape[0]):
            res[i,:,j,:] = afnumpy.dot(a[i],b[j])

    # Finally appropriately reshape the result array
    return res.reshape(res_shape)
Exemple #7
0
def simple_blas(verbose=False):
    display_func = _util.display_func(verbose)
    print_func   = _util.print_func(verbose)
    a = af.randu(5,5)
    b = af.randu(5,5)

    display_func(af.matmul(a,b))
    display_func(af.matmul(a,b,af.MATPROP.TRANS))
    display_func(af.matmul(a,b,af.MATPROP.NONE, af.MATPROP.TRANS))

    b = af.randu(5,1)
    display_func(af.dot(b,b))
Exemple #8
0
def forward(nn_np, data):
    """
    Given a dictionary representing a feed forward neural net and an input data matrix compute the network's output and store it within the dictionary
    :param nn: neural network dictionary
    :param data: a numpy n by m matrix where m in the number of input units in nn
    :return: the output layer activations
    """
    nn = af.array(nn_np.ctypes.data, nn_np.shape, nn_np.dtype.char)
    nn['activations'] = [data]
    nn['zs'] = []
    for w, s, b in map(None, nn['weights'], nn['nonlin'], nn['biases']):
        z = af.dot(w, af.transpose(nn['activations'][-1])) + b
        nn['zs'].append(af.transpose(z))
        nn['activations'].append(s[0](af.transpose(z)))
    return nn['activations'][-1]
def forward(nn_np, data):
    """
    Given a dictionary representing a feed forward neural net and an input data matrix compute the network's output and store it within the dictionary
    :param nn: neural network dictionary
    :param data: a numpy n by m matrix where m in the number of input units in nn
    :return: the output layer activations
    """
    nn = nn_np
    nn['activations'] =[]
    afData = data.ctypes.data, data.shape, data.dtype.char
    for i in range(data.shape[0]):
	nn['activations'].append(af.Array(data[i].ctypes.data,data[i].shape,data[i].dtype.char))
    nn['zs'] = []
    for w, s, b in map(None, nn['weights'], nn['nonlin'], nn['biases']):
	
	#print af.transpose(nn['activations'][-1])
	print nn['activations'][-1]
	print ""
	print w
	
        z = af.dot(nn['activations'][-1],w)
        nn['zs'].append(af.transpose(z))
        nn['activations'].append(s[0](af.transpose(z)))
    return nn['activations'][-1]
Exemple #10
0
def vdot(a, b):
    s = arrayfire.dot(arrayfire.conjg(a.flat.d_array), b.flat.d_array)
    return afnumpy.ndarray((), dtype=a.dtype, af_array=s)[()]
Exemple #11
0
def vdot(a, b):
    s = arrayfire.dot(arrayfire.conjg(a.flat.d_array), b.flat.d_array)
    return afnumpy.ndarray((), dtype=a.dtype, af_array=s)[()]
#!/usr/bin/python
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################

import arrayfire as af

a = af.randu(5, 5)
b = af.randu(5, 5)

af.display(af.matmul(a, b))
af.display(af.matmul(a, b, af.AF_MAT_TRANS))
af.display(af.matmul(a, b, af.AF_MAT_NONE, af.AF_MAT_TRANS))

b = af.randu(5, 1)
af.display(af.dot(b, b))
Exemple #13
0
#!/usr/bin/python
import arrayfire as arr

a = arr.random.rand(4,3)
b = arr.random.randn(3,5)
c = arr.dot(a,b)
d = arr.sum(c)
d0 = arr.sum(c, 0)
d1 = arr.sum(c, 1)

print(a)
print(b)
print(c)
print(d)
print(d0)
print(d1)