def custom_conv2d(bottom_data, weights, bias):
    input = bottom_data.copy()
    shape = bottom_data.shape
    if len(shape) > 3:
        input = input.reshape((shape[1], shape[2], shape[3]))

    input = input.transpose((1, 2, 0))
    #weights = weights.transpose((0, 1, 2, 3))  wrong
    #weights = weights.transpose((0, 1, 3, 2))  wrong
    weights = weights.transpose((0, 2, 3, 1))
    #weights = weights.transpose((0, 2, 1, 3))   wrong
    #weights = weights.transpose((0, 3, 1, 2))
    #weights = weights.transpose((0, 3, 2, 1))

    conv_result = numpycnn.conv(input, weights)
    for bias_index in range(len(bias)):
        conv_result[:, :,
                    bias_index] = conv_result[:, :,
                                              bias_index] + bias[bias_index]
    return conv_result
Esempio n. 2
0
data = fd.data
y = fd.label
length = len(data)

# get Synapse
syn0 = open("syn0.pickle", "rb")
syn0 = pickle.load(syn0)
syn1 = open("syn1.pickle", "rb")
syn1 = pickle.load(syn1)

# Testing and Counting truth Rate
epoch = length
benar = 0
runn = 0
for i in range(epoch):
    ri = np.random.randint(length)
    # singleData = np.reshape(data[ri], (-1, 28)) # reshape into 28 x 28
    singleData = data[ri]
    l1_feature_map = numpycnn.conv(singleData, filters.filter)
    # ReLu layer
    l1_feature_map_relu = numpycnn.relu(l1_feature_map)
    # Pooling Layer
    l1_feature_map_relu_pool = numpycnn.pooling(l1_feature_map_relu, 2, 2)

    # Forward Propagation
    final = np.array([l1_feature_map_relu_pool.ravel()])
    print(testing(final.ravel(), y[ri]))
    # benar += testing(final.ravel(),y[ri])
    # runn += 1
    # print('process : ',round(i/epoch*100,2),'% truth rate : ', round(benar / runn * 100,2), '%')
Esempio n. 3
0
# Converting the image into gray.
img = skimage.color.rgb2gray(img)

# First conv layer
#l1_filter = numpy.random.rand(2,7,7)*20 # Preparing the filters randomly.
l1_filter = numpy.zeros((2,3,3))
l1_filter[0, :, :] = numpy.array([[[-1, 0, 1],
                                   [-1, 0, 1],
                                   [-1, 0, 1]]])
l1_filter[1, :, :] = numpy.array([[[1,   1,  1],
                                   [0,   0,  0],
                                   [-1, -1, -1]]])

print("\n**Working with conv layer 1**")
l1_feature_map = numpycnn.conv(img, l1_filter)
print("\n**ReLU**")
l1_feature_map_relu = numpycnn.relu(l1_feature_map)
print("\n**Pooling**")
l1_feature_map_relu_pool = numpycnn.pooling(l1_feature_map_relu, 2, 2)
print("**End of conv layer 1**\n")

# Second conv layer
l2_filter = numpy.random.rand(3, 5, 5, l1_feature_map_relu_pool.shape[-1])
print("\n**Working with conv layer 2**")
l2_feature_map = numpycnn.conv(l1_feature_map_relu_pool, l2_filter)
print("\n**ReLU**")
l2_feature_map_relu = numpycnn.relu(l2_feature_map)
print("\n**Pooling**")
l2_feature_map_relu_pool = numpycnn.pooling(l2_feature_map_relu, 2, 2)
print("**End of conv layer 2**\n")
Esempio n. 4
0
np.random.seed(1)
# synapse
syn0 = 2 * np.random.random((4608, 100)) - 1
syn1 = 2 * np.random.random((100, 5)) - 1

length = len(data)
print(length)
epoch = 1 * length
for j in range(1):
    # print(j)
    ri = np.random.randint(length)
    # convoluting layer
    # singleData = np.reshape(data[ri], (-1, 28)) # reshape into 28 x 28 pnly for MNIST
    singleData = data[ri] / 255
    print(np.amax(singleData), np.amin(singleData))
    l1_feature_map = numpycnn.conv(singleData, filters.filter1)
    l1_feature_map_relu = numpycnn.relu(l1_feature_map)
    l1_feature_map_relu_pool = numpycnn.pooling(l1_feature_map_relu, 2, 2)
    print(l1_feature_map_relu_pool.shape, np.amax(l1_feature_map_relu_pool),
          np.amin(l1_feature_map_relu_pool))

    feature_input = []
    for in2, conv2 in enumerate(l1_feature_map_relu_pool.T):
        l2_feature_map = numpycnn.conv(conv2, filters.filter2)
        l2_feature_map_relu = numpycnn.relu(l2_feature_map)
        l2_feature_map_relu_pool = numpycnn.pooling(l2_feature_map_relu, 2, 2)
        cv2.imwrite('conv1_' + str(in2) + '.jpg', conv2 * 255)
        print(l2_feature_map_relu_pool.shape,
              np.amax(l2_feature_map_relu_pool),
              np.amin(l2_feature_map_relu_pool))
def custom_pool(bottom_data, size, stride):
    #return numpycnn.pooling(bottom_data, size, stride)
    return numpycnn.pooling_new(bottom_data, size, stride)