Пример #1
0
def activation(x, w_conv, stride, act_conv, w_dense, b_dense, act_dense):

    # convolution
    tmp = conv.conv_1d(x, w_conv, stride)
    tmp = act.dict_activations[act_conv](tmp)
    # dense layer
    tmp = np.dot(tmp, w_dense) + bias_dense
    tmp = act.dict_activations[act_dense](tmp)

    return np.sum(tmp)
def activation(x, w_conv_1, stride_1, act_conv_1, w_conv_2, stride_2,
               act_conv_2, w_dense_1, b_dense_1, act_dense_1, w_dense_2,
               b_dense_2, act_dense_2):

    # convolutions
    tmp = conv.conv_1d(x, w_conv_1, stride_1)
    tmp = act.dict_activations[act_conv_1](tmp)

    tmp = conv.conv_1d(tmp, w_conv_2, stride_2)
    tmp = act.dict_activations[act_conv_2](tmp)

    # dense layers
    tmp = np.dot(tmp, w_dense_1) + b_dense_1
    tmp = act.dict_activations[act_dense_1](tmp)

    tmp = np.dot(tmp, w_dense_2) + b_dense_2
    tmp = act.dict_activations[act_dense_2](tmp)

    return np.sum(tmp)
Пример #3
0
 def activation(self, input_, accumulate=False):
     
     if accumulate is True:
         
         self.input_ = cp.copy(input_)
     
     output = conv.conv_1d(input_, self.weights, self.stride)
     output = act.dict_activations[self.act](output)
     
     self.output = cp.copy(output)
     
     return output
def activation(input_, w_conv, stride, act_conv, w_dense, b_dense, act_dense):

    tmp = input_

    # convolutions
    for i in range(len(w_conv)):

        tmp = conv.conv_1d(tmp, w_conv[i], stride[i])
        tmp = act.dict_activations[act_conv[i]](tmp)

    # dense layers
    for i in range(len(w_dense)):

        tmp = np.dot(tmp, w_dense[i]) + b_dense[i]
        tmp = act.dict_activations[act_dense[i]](tmp)

    return tmp
Пример #5
0
def exp(series, weights, stride):

    res = conv.conv_1d(series, weights, stride)
    return np.exp(res)
Пример #6
0
def linear(series, weights, stride):

    res = conv.conv_1d(series, weights, stride)
    return res
Пример #7
0
    weights = np.random.rand(1, np.random.randint(1, 10))
    stride = np.random.randint(1, 10)

    weights_c = np.random.rand(1, np.random.randint(1, 10))
    stride_c = np.random.randint(1, 10)

    ### linear activation test ###
    # derivative by implementation (chain rule)
    derivative_by_implementation = stm.series_to_matrix(
        inp, weights.shape[1], stride).T
    tmp = np.zeros(shape=(
        weights.shape[1],
        linear(linear(inp, weights, stride), weights_c, stride_c).shape[1]))
    for i in range(derivative_by_implementation.shape[0]):
        tmp[i] = conv.conv_1d(derivative_by_implementation[np.newaxis, i],
                              weights_c, stride_c)
    derivative_by_implementation = np.sum(tmp, axis=1)

    # derivative by definition
    derivative_by_def = list()
    epsilon = 1e-5
    for i in range(weights.shape[1]):

        weights[:, i] += epsilon
        f_plus = linear(linear(inp, weights, stride), weights_c, stride_c)
        weights[:, i] -= 2 * epsilon
        f_minus = linear(linear(inp, weights, stride), weights_c, stride_c)
        weights[:, i] += epsilon

        derivative_by_def.append(np.sum((f_plus - f_minus) / (2 * epsilon)))
Пример #8
0
    # create the net
    net = nn.NN(net_blocks)

    # initialize the parameters to random values between [-1, 1]
    net.init_parameters(['uniform', -1., 1.])

    input_ = np.random.rand(1, net.n_inputs)

    tmp = cp.copy(input_)
    for i in range(len(net.layers)):

        if isinstance(net.layers[i], layer_conv.Conv):

            weights = net.layers[i].weights
            stride = net.layers[i].stride
            tmp = conv.conv_1d(tmp, weights, stride)
            tmp = act.dict_activations[net.layers[i].act](tmp)

        elif isinstance(net.layers[i], layer_dense.Dense):

            weights = net.layers[i].weights
            bias = net.layers[i].bias
            tmp = np.dot(tmp, weights) + bias
            tmp = act.dict_activations[net.layers[i].act](tmp)

        output_by_definition = tmp

    # calculate net's output
    net.activation(input_)
    output_by_calculation = cp.copy(net.output)
Пример #9
0
import convolution as conv
import series_to_matrix as stm

if __name__ == '__main__':

    np.random.seed(43)  # the answer to everything, plus 1
    series = np.random.rand(1, 100)

    for k in range(1, 10):

        for s in range(1, 10):

            kernel = np.random.rand(1, k)

            # calculate convolution with different couples of (kernels, stridings)
            res = conv.conv_1d(series, kernel, striding=s)

            print("Series shape ", series.shape, "\nKernel shape ",
                  kernel.shape, "\nStride shape ", s, "\nResult shape ",
                  res.shape)
            print("#######\n")

            # case where striding is 1 and we have a numpy counterpart function
            if s == 1:

                # test convolution, should rise exception if convolution is wrong
                res_numpy = np.correlate(series[0, :], kernel[0, :])
                np.testing.assert_almost_equal(res[0, :], res_numpy)

                # test SIMD convolution, should rise exception if convolution is
                #  wrong