Beispiel #1
0
def classify(img, label, params, conv_s, pool_f, pool_s):

    params = []
    with (open(
            'C:\\Users\\Admin\\Documents\\GitHub\\CNN-TinyImagenet\\Presentation\\test.pkl',
            "rb")) as openfile:
        while True:
            try:
                params.append(pickle.load(openfile))
            except EOFError:
                break

    [f1, f2, f3, f4, f5, w6, w7, b1, b2, b3, b4, b5, b6, b7] = params[0]

    #forward operations

    conv1 = cnn.conv(img, f1, b1, conv_s)
    conv1 = cnn.relu(conv1)

    pooled1 = sm.maxpool(conv1, pool_f, pool_s)

    conv2 = cnn.conv(pooled1, f2, b2, conv_s)
    conv2 = cnn.relu(conv2)

    pooled2 = sm.maxpool(conv2, pool_f, pool_s)
    # print("pooled2", pooled2.shape)

    conv3 = cnn.conv(pooled2, f3, b3, conv_s)
    conv3 = cnn.relu(conv3)

    conv4 = cnn.conv(conv3, f4, b4, conv_s)
    conv4 = cnn.relu(conv4)

    conv5 = cnn.conv(conv4, f5, b5, conv_s)
    conv5 = cnn.relu(conv5)
    # print("conv5: ", conv5.shape)
    pooled3 = sm.maxpool(conv5, pool_f, pool_s)

    (nf2, dim2, _) = pooled3.shape
    # print("params: ",nf2, dim2)
    fc = pooled3.reshape((nf2 * dim2 * dim2, 1))

    # print(fc.shape, w6.shape, b6.shape)
    z = w6.dot(fc) + b6
    z = cnn.relu(z)
    out = w7.dot(z) + b7

    probs = sm.softmax(out)

    print(probs)

    return probs
Beispiel #2
0
def load_sequence(folder):
    sequence_folder = glob.glob(os.path.join(folder, '*.dcm'))
    for filename in sequence_folder:
        print(filename)
        ds = pydicom.dcmread(filename)
        arr = ds.pixel_array
        arr_colorimage = apply_color_lut(arr, palette='PET')
        # print(arr)
        # plt.imshow(arr,cmap=plt.cm.bone)
        # plt.show()
        l1_filter = numpy.zeros((2, 3, 3))
        l1_filter[0, :, :] = numpy.array([[[-1, 0, 1], [-1, 0, 1], [-1, 0,
                                                                    1]]])
        l1_filter[1, :, :] = numpy.array([[[1, 1, 1], [0, 0, 0], [-1, -1,
                                                                  -1]]])
        l1_feature_map = cnn.conv(arr, l1_filter)
        # plt.imshow(l1_feature_map[:, :, 0],cmap=plt.cm.bone)
        # plt.show()
        l1_feature_map_relu = cnn.relu(l1_feature_map)
        l1_feature_map_relu_pool = cnn.pooling(l1_feature_map_relu, 2, 2)
        data = asarray(l1_feature_map_relu_pool)

        print(data[0].shape)
        # save to csv file
        savetxt('data.csv', data[0], delimiter=',')
def feed_forward(img, label, params, conv_s, pool_f, pool_s):

    [f1, f2, f3, f4, f5, w6, w7, b1, b2, b3, b4, b5, b6, b7] = params

    #forward operations

    conv1 = cnn.conv(img, f1, b1, conv_s)
    conv1 = cnn.relu(conv1)

    pooled1 = sm.maxpool(conv1, pool_f, pool_s)

    conv2 = cnn.conv(pooled1, f2, b2, conv_s)
    conv2 = cnn.relu(conv2)

    pooled2 = sm.maxpool(conv2, pool_f, pool_s)
    # print("pooled2", pooled2.shape)

    conv3 = cnn.conv(pooled2, f3, b3, conv_s)
    conv3 = cnn.relu(conv3)

    conv4 = cnn.conv(conv3, f4, b4, conv_s)
    conv4 = cnn.relu(conv4)

    conv5 = cnn.conv(conv4, f5, b5, conv_s)
    conv5 = cnn.relu(conv5)
    # print("conv5: ", conv5.shape)
    pooled3 = sm.maxpool(conv5, pool_f, pool_s)

    (nf2, dim2, _) = pooled3.shape
    # print("params: ",nf2, dim2)
    fc = pooled3.reshape((nf2 * dim2 * dim2, 1))

    # print(fc.shape, w6.shape, b6.shape)
    z = w6.dot(fc) + b6
    z = cnn.relu(z)
    out = w7.dot(z) + b7

    probs = sm.softmax(out)

    return probs
Beispiel #4
0
def main_cnn():
    mnist_train = sio.loadmat('./mnist_train.mat')
    mnist_test = sio.loadmat('./mnist_test.mat')
    im_train, label_train = mnist_train['im_train'], mnist_train['label_train']
    im_test, label_test = mnist_test['im_test'], mnist_test['label_test']
    batch_size = 32
    im_train, im_test = im_train / 255.0, im_test / 255.0
    mini_batch_x, mini_batch_y = cnn.get_mini_batch(im_train, label_train,
                                                    batch_size)
    # learning_rates = [.14, .16, .18]
    # decay_rates = [.85, .9, .95]
    # for l in learning_rates:
    #     for d in decay_rates:
    w_conv, b_conv, w_fc, b_fc = cnn.train_cnn(mini_batch_x, mini_batch_y)
    sio.savemat('cnn.mat',
                mdict={
                    'w_conv': w_conv,
                    'b_conv': b_conv,
                    'w_fc': w_fc,
                    'b_fc': b_fc
                })
    # could use following two lines to replace above two lines if only want to check results
    # data = sio.loadmat('cnn.mat')
    # w_conv, b_conv, w_fc, b_fc = data['w_conv'], data['b_conv'], data['w_fc'], data['b_fc']

    acc = 0
    confusion = np.zeros((10, 10))
    num_test = im_test.shape[1]
    for i in range(num_test):
        x = im_test[:, [i]].reshape((14, 14, 1), order='F')
        pred1 = cnn.conv(x, w_conv, b_conv)  # (14, 14, 3)
        pred2 = cnn.relu(pred1)  # (14, 14, 3)
        pred3, maxes = cnn.pool2x2(pred2)  # (7, 7, 3)
        pred4 = cnn.flattening(pred3)  # (147, 1)
        y = cnn.fc(pred4, w_fc, b_fc)  # (10, 1)
        l_pred = np.argmax(y)
        confusion[l_pred,
                  label_test[0, i]] = confusion[l_pred, label_test[0, i]] + 1
        if l_pred == label_test[0, i]:
            acc = acc + 1
    accuracy = acc / num_test
    # print("Learning rate:", l, "Decay rate:", d, "Accuracy:", accuracy)
    for i in range(10):
        confusion[:, i] = confusion[:, i] / np.sum(confusion[:, i])

    label_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    visualize_confusion_matrix(confusion, accuracy, label_classes,
                               'CNN Confusion Matrix')
Beispiel #5
0
def main_cnn(retrain_tag):
    mnist_train = sio.loadmat('./mnist_train.mat')
    mnist_test = sio.loadmat('./mnist_test.mat')
    im_train, label_train = mnist_train['im_train'], mnist_train['label_train']
    im_test, label_test = mnist_test['im_test'], mnist_test['label_test']
    batch_size = 32
    im_train, im_test = im_train / 255.0, im_test / 255.0

    mini_batch_x, mini_batch_y = get_mini_batch(im_train, label_train,
                                                batch_size)

    if retrain_tag:
        w_conv, b_conv, w_fc, b_fc = train_cnn(mini_batch_x, mini_batch_y)
        sio.savemat('cnn.mat',
                    mdict={
                        'w_conv': w_conv,
                        'b_conv': b_conv,
                        'w_fc': w_fc,
                        'b_fc': b_fc
                    })
    else:
        data = sio.loadmat('cnn.mat')
        w_conv, b_conv, w_fc, b_fc = data['w_conv'], data['b_conv'], data[
            'w_fc'], data['b_fc']
    acc = 0
    confusion = np.zeros((10, 10))
    num_test = im_test.shape[1]
    for i in range(num_test):
        x = im_test[:, [i]].reshape((14, 14, 1), order='F')
        pred1 = conv(x, w_conv, b_conv)  # (14, 14, 3)
        pred2 = relu(pred1)  # (14, 14, 3)
        pred3 = pool2x2(pred2)  # (7, 7, 3)
        pred4 = flattening(pred3)  # (147, 1)
        y = fc(pred4, w_fc, b_fc)  # (10, 1)
        l_pred = np.argmax(y)
        confusion[l_pred,
                  label_test[0, i]] = confusion[l_pred, label_test[0, i]] + 1
        if l_pred == label_test[0, i]:
            acc = acc + 1
    accuracy = acc / num_test
    for i in range(10):
        confusion[:, i] = confusion[:, i] / np.sum(confusion[:, i])

    label_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    visualize_confusion_matrix(confusion, accuracy, label_classes,
                               'CNN Confusion Matrix')
Beispiel #6
0
def vs_multilayer(input_batch, name, middle_layer_dim=1000, reuse=False):
    with tf.variable_scope(name):
        if reuse == True:
            print name + " reuse variables"
            tf.get_variable_scope().reuse_variables()
        else:
            print name + " doesn't reuse variables"

        layer1 = conv_relu('layer1',
                           input_batch,
                           kernel_size=1,
                           stride=1,
                           output_dim=middle_layer_dim)
        sim_score = conv('layer2',
                         layer1,
                         kernel_size=1,
                         stride=1,
                         output_dim=3)
    return sim_score
Beispiel #7
0
def main_cnn():
    mnist_train = sio.loadmat('./ReducedMNIST/mnist_train.mat')
    mnist_test = sio.loadmat('./ReducedMNIST/mnist_test.mat')
    im_train, label_train = mnist_train['im_train'], mnist_train['label_train']
    im_test, label_test = mnist_test['im_test'], mnist_test['label_test']
    batch_size = 32
    im_train, im_test = im_train / 255.0, im_test / 255.0
    mini_batch_x, mini_batch_y = get_mini_batch(im_train, label_train,
                                                batch_size)
    w_conv, b_conv, w_fc, b_fc = train_cnn(mini_batch_x, mini_batch_y)
    sio.savemat('cnn.mat',
                mdict={
                    'w_conv': w_conv,
                    'b_conv': b_conv,
                    'w_fc': w_fc,
                    'b_fc': b_fc
                })
    # could use following two lines to replace above two lines if only want to check results
    # data = sio.loadmat('cnn.mat')
    # w_conv, b_conv, w_fc, b_fc = data['w_conv'], data['b_conv'], data['w_fc'], data['b_fc']

    acc = 0
    confusion = np.zeros((10, 10))
    num_test = im_test.shape[1]
    for i in range(num_test):
        x = im_test[:, [i]].reshape((14, 14, 1), order='F')
        pred1 = conv(x, w_conv, b_conv)  # (14, 14, 3)
        pred2 = relu(pred1)  # (14, 14, 3)
        pred3 = pool2x2(pred2)  # (7, 7, 3)
        pred4 = flattening(pred3)  # (147, 1)
        y = fc(pred4, w_fc, b_fc)  # (10, 1)
        l_pred = np.argmax(y)
        confusion[l_pred,
                  label_test[0, i]] = confusion[l_pred, label_test[0, i]] + 1
        if l_pred == label_test[0, i]:
            acc = acc + 1
    accuracy = acc / num_test
    for i in range(10):
        confusion[:, i] = confusion[:, i] / np.sum(confusion[:, i])

    return confusion, accuracy
def main_cnn():
    mnist_train = sio.loadmat('./mnist_train.mat')
    mnist_test = sio.loadmat('./mnist_test.mat')
    im_train, label_train = mnist_train['im_train'], mnist_train['label_train']
    im_test, label_test = mnist_test['im_test'], mnist_test['label_test']
    batch_size = 32
    im_train, im_test = im_train / 255.0, im_test / 255.0
    # plt.imshow(mnist_train['im_train'][:, 0].reshape((14, 14), order='F'), cmap='gray')
    # plt.show()
    # x = im_train[:, 0].reshape((14, 14, 1), order='F')
    # y = pool2x2(x)
    # dl_dy = np.random.rand(7, 7, 1)
    # dl_dx = pool2x2_backward(dl_dy, x, y)
    # plt.imshow(x[:, :, 0], cmap='gray')
    # plt.show()
    # plt.imshow(y[:, :, 0], cmap='gray')
    # plt.show()
    # plt.imshow(dl_dy[:, :, 0], cmap='gray')
    # plt.show()
    # plt.imshow(dl_dx[:, :, 0], cmap='gray')
    # plt.show()
    # x = np.arange(25).reshape((5, 5, 1))
    # w_conv = np.arange(27).reshape((3, 3, 1, 3))
    # b_conv = np.arange(3).reshape((3, 1))
    # y = conv(x, w_conv, b_conv)
    # dl_dy = np.random.random((5, 5, 3))
    # dl_dw, dl_db = conv_backward(dl_dy, x, w_conv, b_conv, y)
    # print(x)
    # print(w_conv)
    # print(b_conv)
    # print(y)
    # print(dl_dw.shape)
    # print(dl_db)
    # exit(-1)
    mini_batches_x, mini_batches_y = get_mini_batch(im_train, label_train,
                                                    batch_size)
    w_conv, b_conv, w_fc, b_fc = train_cnn(mini_batches_x, mini_batches_y
                                           # , im_test, label_test
                                           )
    sio.savemat('cnn.mat',
                mdict={
                    'w_conv': w_conv,
                    'b_conv': b_conv,
                    'w_fc': w_fc,
                    'b_fc': b_fc
                })
    # could use following two lines to replace above two lines if only want to check results
    # data = sio.loadmat('cnn.mat')
    # w_conv, b_conv, w_fc, b_fc = data['w_conv'], data['b_conv'], data['w_fc'], data['b_fc']

    acc = 0
    confusion = np.zeros((10, 10))
    num_test = im_test.shape[1]
    for i in range(num_test):
        print('Test # {}/{}: \r'.format(i + 1, num_test), end='')
        x = im_test[:, [i]].reshape((14, 14, 1), order='F')
        pred1 = conv(x, w_conv, b_conv)  # (14, 14, 3)
        pred2 = relu(pred1)  # (14, 14, 3)
        pred3 = pool2x2(pred2)  # (7, 7, 3)
        pred4 = flattening(pred3)  # (147, 1)
        y = fc(pred4, w_fc, b_fc)  # (10, 1)
        l_pred = np.argmax(y)
        confusion[l_pred,
                  label_test[0, i]] = confusion[l_pred, label_test[0, i]] + 1
        if l_pred == label_test[0, i]:
            acc = acc + 1
    accuracy = acc / num_test
    print(accuracy)
    for i in range(10):
        confusion[:, i] = confusion[:, i] / np.sum(confusion[:, i])

    label_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    visualize_confusion_matrix(confusion, accuracy, label_classes,
                               'CNN Confusion Matrix')
Beispiel #9
0
import skimage.data
import numpy
import matplotlib
import cnn

img = skimage.data.coffee()

# First conv layer
num_filters = 2
depth = img.shape[-1]
stride = 2.0
l1_filter = numpy.random.rand(num_filters, 3, 3, img.shape[-1])

print("\n**Working with conv layer 1**")
l1_feature_map = cnn.conv(img, l1_filter)
print("\n**ReLU**")
l1_feature_map_relu = cnn.relu(l1_feature_map)
print("\n**Pooling**")
l1_feature_map_relu_pool = cnn.pooling(l1_feature_map_relu, 2, 2)
print("\n**Fully connected**")
l1_fc1_weights = numpy.random.rand(10, numpy.prod(l1_feature_map.shape))
l1_feature_map_fc = cnn.fc(l1_feature_map, l1_fc1_weights)
print("**End of conv layer 1**\n")

# Graphing results
fig0, ax0 = matplotlib.pyplot.subplots(nrows=1, ncols=1)
ax0.imshow(img).set_cmap("gray")
ax0.set_title("Input Image")
ax0.get_xaxis().set_ticks([])
ax0.get_yaxis().set_ticks([])
matplotlib.pyplot.savefig("in_img.png", bbox_inches="tight")
Beispiel #10
0
def make_network(img, label, params, conv_s, pool_f, pool_s):
    img = img.reshape(3, 64, 64)
    [f1, f2, f3, f4, f5, w6, w7, b1, b2, b3, b4, b5, b6, b7] = params

    #forward operations

    conv1 = cnn.conv(img, f1, b1, conv_s)
    conv1 = cnn.relu(conv1)

    pooled1 = sm.maxpool(conv1, pool_f, pool_s)

    conv2 = cnn.conv(pooled1, f2, b2, conv_s)
    conv2 = cnn.relu(conv2)

    pooled2 = sm.maxpool(conv2, pool_f, pool_s)
    # print("pooled2", pooled2.shape)

    conv3 = cnn.conv(pooled2, f3, b3, conv_s)
    conv3 = cnn.relu(conv3)

    conv4 = cnn.conv(conv3, f4, b4, conv_s)
    conv4 = cnn.relu(conv4)

    conv5 = cnn.conv(conv4, f5, b5, conv_s)
    conv5 = cnn.relu(conv5)
    # print("conv5: ", conv5.shape)
    pooled3 = sm.maxpool(conv5, pool_f, pool_s)

    (nf2, dim2, _) = pooled3.shape
    # print("params: ",nf2, dim2)
    fc = pooled3.reshape((nf2 * dim2 * dim2, 1))

    # print(fc.shape, w6.shape, b6.shape)
    z = w6.dot(fc) + b6
    z = cnn.relu(z)
    out = w7.dot(z) + b7

    probs = sm.softmax(out)

    loss = cnn.cross_entropy(probs, label)

    #conv1 + relu> pooled1 > conv2 + relu > pooled2 > conv3 + relu > conv4 + relu > conv5 + relu > pooled3 > fc + relu > fc
    #backward operations
    dout = probs - label

    dw7 = dout.dot(z.T)  # loss gradient of final dense layer weights
    db7 = np.sum(dout, axis=1).reshape(
        b7.shape)  # loss gradient of final dense layer biases

    dz = w7.T.dot(dout)  # loss gradient of first dense layer outputs
    dz[z <= 0] = 0  # backpropagate through ReLU
    dw6 = dz.dot(fc.T)
    db6 = np.sum(dz, axis=1).reshape(b6.shape)

    dfc = w6.T.dot(
        dz)  # loss gradients of fully-connected layer (pooling layer)
    dpool3 = dfc.reshape(
        pooled3.shape
    )  # reshape fully connected into dimensions of pooling layer

    dconv5 = cnn.pool_back(
        dpool3, conv5, pool_f, pool_s
    )  # backprop through the max-pooling layer(only neurons with highest activation in window get updated)
    dconv5[conv5 <= 0] = 0  # backpropagate through ReLU

    dconv4, df5, db5 = cnn.conv_back(
        dconv5, conv4, f5, conv_s
    )  # backpropagate previous gradient through second convolutional layer.
    dconv4[conv4 <= 0] = 0  # backpropagate through ReLU

    dconv3, df4, db4 = cnn.conv_back(
        dconv4, conv3, f4, conv_s
    )  # backpropagate previous gradient through second convolutional layer.
    dconv3[conv3 <= 0] = 0  # backpropagate through ReLU

    dpool2, df3, db3 = cnn.conv_back(
        dconv3, pooled2, f3, conv_s
    )  # backpropagate previous gradient through second convolutional layer.
    # print("pooled2", pooled2.shape, "dpool2: ", dpool2.shape)
    dpool2[pooled2 <= 0] = 0  # backpropagate through ReLU

    dconv2 = cnn.pool_back(
        dpool2, conv2, pool_f, pool_s
    )  # backprop through the max-pooling layer(only neurons with highest activation in window get updated)
    dconv2[conv2 <= 0] = 0  # backpropagate through ReLU

    dpool1, df2, db2 = cnn.conv_back(
        dconv2, pooled1, f2, conv_s
    )  # backpropagate previous gradient through second convolutional layer.
    dpool1[pooled1 <= 0] = 0  # backpropagate through ReLU

    dconv1 = cnn.pool_back(
        dpool1, conv1, pool_f, pool_s
    )  # backprop through the max-pooling layer(only neurons with highest activation in window get updated)
    dconv1[conv1 <= 0] = 0  # backpropagate through ReLU

    dimage, df1, db1 = cnn.conv_back(
        dconv1, img, f1, conv_s
    )  # backpropagate previous gradient through first convolutional layer.

    grads = [
        df1, df2, df3, df4, df5, dw6, dw7, db1, db2, db3, db4, db5, db6, db7
    ]

    return grads, loss