Exemplo n.º 1
0
    def __init__(self, input_len, nodes):
    # We divide by input_len to reduce the variance of our initial values
    self.weights = np.random.randn(input_len, nodes) / input_len
    self.biases = np.zeros(nodes)

    def forward(self, input):
    
    #Performs a forward pass of the softmax layer using the given input.
    #Returns a 1d numpy array containing the respective probability values.
    # input can be any array with any dimensions.

    input = input.flatten()

    input_len, nodes = self.weights.shape

    totals = np.dot(input, self.weights) + self.biases
    exp = np.exp(totals)
    return exp / np.sum(exp, axis=0)


# In[ ]:


import mnist
import numpy as np
from conv import Conv3x3
from maxpool import MaxPool2
from softmax import Softmax

# We only use the first 1k testing examples (out of 10k total)
# in the interest of time. Feel free to change this if you want.
test_images = mnist.test_images()[:1000]
test_labels = mnist.test_labels()[:1000]

conv = Conv3x3(8)                  # 28x28x1 -> 26x26x8
pool = MaxPool2()                  # 26x26x8 -> 13x13x8
softmax = Softmax(13 * 13 * 8, 10) # 13x13x8 -> 10

def forward(image, label):

  #Completes a forward pass of the CNN and calculates the accuracy and
  #cross-entropy loss.
  #image is a 2d numpy array
  #label is a digit
  # We transform the image from [0, 255] to [-0.5, 0.5] to make it easier
  # to work with. This is standard practice.
    out = conv.forward((image / 255) - 0.5)
    out = pool.forward(out)
    out = softmax.forward(out)

  # Calculate cross-entropy loss and accuracy. np.log() is the natural log.
  loss = -np.log(out[label])
    acc = 1 if np.argmax(out) == label else 0

    return out, loss, acc
Exemplo n.º 2
0
y_hot = oh.transform(y)
y_cat = to_categorical(y)

train_images, test_images, train_labels, test_labels = train_test_split(X, y_hot, test_size=.2, random_state=42)
x_train, x_test, y_cat_train, y_cat_test = train_test_split(X, y_cat, test_size=.2, random_state=42)
"""

# We only use the first 1k examples of each set in the interest of time.
# Feel free to change this if you want.
(train_X, train_Y), (test_X, test_Y) = tf.keras.datasets.mnist.load_data()
train_images = train_X[:1000]
train_labels = train_Y[:1000]
test_images = test_X[:1000]
test_labels = test_Y[:1000]

conv = Conv3x3(8)  # 28x28x1 -> 26x26x8
pool = MaxPool2()  # 26x26x8 -> 13x13x8
softmax = Softmax(13 * 13 * 8, 10)  # 13x13x8 -> 10


def forward(image, label):
    """
    Completes a forward pass of the CNN and calculates the accuracy and
    cross-entropy loss.
    - image is a 2d numpy array
    - label is a digit
    """
    # We transform the image from [0, 255] to [-0.5, 0.5] to make it easier
    # to work with. This is standard practice.
    out = conv.forward((image / 255) - 0.5)
    out = pool.forward(out)
Exemplo n.º 3
0
import mnist
import numpy as np
from PIL import Image
from conv import Conv3x3
from maxpool import MaxPool2
from softmax import Softmax

train_images = mnist.train_images()[:100]
train_labels = mnist.train_labels()[:100]
test_images = mnist.test_images()[:1000]
test_labels = mnist.test_labels()[:1000]

conv = Conv3x3(8)
pool = MaxPool2()
softmax = Softmax(13 * 13 * 8, 10)


def forward(image, label):
    out = conv.forward((image / 255) - 0.5)
    out = pool.forward(out)
    out = softmax.forward(out)

    loss = -np.log(out[label])
    acc = 1 if np.argmax(out) == label else 0

    return out, loss, acc


def train(image, label, lr=.005):
    out, loss, acc = forward(image, label)
Exemplo n.º 4
0
test_labels = mnist.test_labels()[:300]

permutation = np.random.permutation(len(train_images))

train_images = train_images[permutation]
train_labels = train_labels[permutation]
num_class = np.max(train_labels) + 1

if len(train_images[0].shape) == 2:
    h, w = train_images[0].shape
    image_channels = 1
else:
    h, w, image_channels = train_images[0].shape

##model initialization
conv = Conv3x3(num_filters, filter_h, filter_w, image_channels, rank)
pool = MaxPool2()
softmax = Softmax(int((h - 2) / 2) * int((w - 2) / 2) * num_filters, num_class)
#####uncommented this block if want to check 2-layer CPAC-CNN######
#conv2 = Conv3x3(num_filters,filter_h,filter_w,num_filters,rank) # 28x28x1 -> 26x26x8
#softmax = Softmax(int(((h-2)/2 - 2)) * int(((w-2)/2 - 2)) * num_filters, num_class)
###################################################################

##result directory
par_root = "./result"
file_root = par_root + '/' + 'mnist_single_' + str(num_filters) + '_' + str(
    rank) + '_' + str(num_epoch) + '_' + time.strftime("%Y%m%d-%H%M%S")
os.mkdir(file_root)

##log file
file_name = 'mnist_single_' + str(num_filters) + '_' + str(rank) + '_' + str(
Exemplo n.º 5
0
    def iterate_regions(self, image):

    #Generates all possible 3x3 image regions using valid padding.
    #image is a 2d numpy array

    h, w = image.shape

    for i in range(h - 2):
        for j in range(w - 2):
        im_region = image[i:(i + 3), j:(j + 3)]
        yield im_region, i, j

    def forward(self, input):

    #Performs a forward pass of the conv layer using the given input.
    #Returns a 3d numpy array with dimensions (h, w, num_filters).
    #   - input is a 2d numpy array
    
    h, w = input.shape
    output = np.zeros((h - 2, w - 2, self.num_filters))

    for im_region, i, j in self.iterate_regions(input):
        output[i, j] = np.sum(im_region * self.filters, axis=(1, 2))
        
    return output


# In[ ]:


import mnist
from conv import Conv3x3

# The mnist package handles the MNIST dataset for us!
# Learn more at https://github.com/datapythonista/mnist
train_images = mnist.train_images()
train_labels = mnist.train_labels()

conv = Conv3x3(8)
output = conv.forward(train_images[0])
print(output.shape) # (26, 26, 8)


# In[ ]:


#POOLING

class MaxPool2:
  # A Max Pooling layer using a pool size of 2.

    def iterate_regions(self, image):
    
    #Generates non-overlapping 2x2 image regions to pool over.
    #image is a 2d numpy array
    
    h, w, _ = image.shape
    new_h = h // 2
    new_w = w // 2

    for i in range(new_h):
        for j in range(new_w):
        im_region = image[(i * 2):(i * 2 + 2), (j * 2):(j * 2 + 2)]
        yield im_region, i, j

    def forward(self, input):
    
    #Performs a forward pass of the maxpool layer using the given input.
    #Returns a 3d numpy array with dimensions (h / 2, w / 2, num_filters).
    #input is a 3d numpy array with dimensions (h, w, num_filters)
    
    h, w, num_filters = input.shape
    output = np.zeros((h // 2, w // 2, num_filters))

    for im_region, i, j in self.iterate_regions(input):
        output[i, j] = np.amax(im_region, axis=(0, 1))
    return output


# In[ ]:


#SOFTMAX

class Softmax:
  # A standard fully-connected layer with softmax activation.

    def __init__(self, input_len, nodes):
    # We divide by input_len to reduce the variance of our initial values
    self.weights = np.random.randn(input_len, nodes) / input_len
    self.biases = np.zeros(nodes)

    def forward(self, input):
    
    #Performs a forward pass of the softmax layer using the given input.
    #Returns a 1d numpy array containing the respective probability values.
    # input can be any array with any dimensions.

    input = input.flatten()

    input_len, nodes = self.weights.shape

    totals = np.dot(input, self.weights) + self.biases
    exp = np.exp(totals)
    return exp / np.sum(exp, axis=0)


# In[ ]:


import mnist
import numpy as np
from conv import Conv3x3
from maxpool import MaxPool2
from softmax import Softmax

# We only use the first 1k testing examples (out of 10k total)
# in the interest of time. Feel free to change this if you want.
test_images = mnist.test_images()[:1000]
test_labels = mnist.test_labels()[:1000]

conv = Conv3x3(8)                  # 28x28x1 -> 26x26x8
pool = MaxPool2()                  # 26x26x8 -> 13x13x8
softmax = Softmax(13 * 13 * 8, 10) # 13x13x8 -> 10

def forward(image, label):

  #Completes a forward pass of the CNN and calculates the accuracy and
  #cross-entropy loss.
  #image is a 2d numpy array
  #label is a digit
  # We transform the image from [0, 255] to [-0.5, 0.5] to make it easier
  # to work with. This is standard practice.
    out = conv.forward((image / 255) - 0.5)
    out = pool.forward(out)
    out = softmax.forward(out)

  # Calculate cross-entropy loss and accuracy. np.log() is the natural log.
  loss = -np.log(out[label])
    acc = 1 if np.argmax(out) == label else 0

    return out, loss, acc

print('MNIST CNN initialized!')

loss = 0
num_correct = 0
for i, (im, label) in enumerate(zip(test_images, test_labels)):
  # Do a forward pass.
  _, l, acc = forward(im, label)
    loss += l
    num_correct += acc

  # Print stats every 100 steps.
  if i % 100 == 99:
    print(
      #[Step %d] Past 100 steps: Average Loss %.3f | Accuracy: %d%%' %
      (i + 1, loss / 100, num_correct)
    )
    loss = 0
    num_correct = 0
Exemplo n.º 6
0
def debug_cnn(n_iter, version, learn_rate):

#    from importlib import reload 
    path = "/home/konstantin/Documents/master_arbeit/"
    sys.path.append(path)
    import functions as fun
    #import os
    
    if version == "changed":
        print("changed blog version is being used")
        path_blog_changed = "/home/konstantin/Documents/master_arbeit/cnn_python/cnn-from-scratch-changed/"
        sys.path.append(path_blog_changed)
        #os.listdir(path_blog_changed)    
        from conv import Conv3x3
        from maxpool import MaxPool2
        from softmax import Softmax
    
    if version == "original":
        print("original version is being used")
        path_blog_original = "/home/konstantin/Documents/master_arbeit/cnn_python/original_blog/cnn-from-scratch"
        sys.path.append(path_blog_original)
        from conv import Conv3x3
        from maxpool import MaxPool2
        from softmax import Softmax
#        Conv3x3 = reload(Conv3x3)
#        MaxPool2 = reload(MaxPool2)
#        Softmax = reload(Softmax)
 
        
        
    num_filters = 8
    np.random.seed(seed=444); own_filter_conv = np.random.randn(num_filters, 3, 3) / 9
    own_filter_conv = np.round(own_filter_conv)
    
    dim_maxpool = 13 * 13 *8
    np.random.seed(seed=666); own_weight_soft = (np.random.randn(dim_maxpool, 10) / dim_maxpool) 
    own_bias_soft = np.zeros(10)
    
    conv = Conv3x3(8)  # 28x28x1 -> 26x26x8
    pool = MaxPool2()  # 26x26x8 -> 13x13x8
    dim_maxpool = np.prod(13 * 13 * 8)
    softmax = Softmax(dim_maxpool, 10)
    
    for i in range(n_iter):

        image = test_images[i] / 255 - 0.5
        label = test_labels[i]
        
        own_feature_map, own_filter_conv = fun.convolute(image=image, filter_matrix=own_filter_conv)
        own_maxpool_map = fun.maxpool(feature_map=own_feature_map)
        own_probs, own_inter_soft = fun.softmax(own_maxpool_map, 
                                        weight_matrix=own_weight_soft, 
                                        bias_vector=own_bias_soft)
        own_weight_soft, own_bias_soft, own_gradient_soft = fun.backprop_softmax(inter_soft=own_inter_soft,
                                                           probabilities=own_probs,
                                                           label = label,
                                                           learn_rate=learn_rate) 
        own_gradient_max = fun.backprop_maxpool(feature_map=own_feature_map, 
                                                gradient=own_gradient_soft)
        own_filter_conv = fun.backprop_conv(image=image, filter_conv=own_filter_conv,
                                        gradient=own_gradient_max, learn_rate=learn_rate)
     
       
    # run model from blog with same data

        blog_out_conv = conv.forward(image)
        #print(out_conv)   
        blog_out_max = pool.forward(blog_out_conv)
        blog_out_soft = softmax.forward(blog_out_max) #
        #print(blog_out_soft)
        gradient_L = np.zeros(10)
        gradient_L[label] = -1 / blog_out_soft[label]
        blog_gradient_soft = softmax.backprop(
                gradient_L, learn_rate)
        blog_gradient_max = pool.backprop(blog_gradient_soft)
        conv.backprop(blog_gradient_max, learn_rate)
        
        
        
        ################## compare feedforward ####################################
        ###########################################################################
        print("This is iteration", i)
        if np.sum(own_feature_map == blog_out_conv) == np.prod(own_feature_map.shape):
            print("YEAAAH! FeatureMaps are the same")
        else:
            print("NOOOO! featuremaps are not the same")
            
       # conv.filters == filter_conv # 
        # after first iteration these are not the same anymore,
        # since they get updated
        if np.sum(own_maxpool_map == blog_out_max) == np.prod(blog_out_max.shape):
            print("YEAHHH! maxpool is the same")
        else:
            print("NOOOO! maxpool is not the same")
        if np.sum(own_probs == blog_out_soft) == np.prod(blog_out_soft.shape):
            print("YEAAAH! predicted probabilities are the same")
        else:
            print("NOOOO! predicted probabilities are not the same")
            print("Own probabilities")
            print(own_probs)
            print("Blog probabilities")
            print(blog_out_soft)
         #   break
        ######################### compare backprop #################################
        ############################################################################
         
        ######## softmax: gradients:    
        if np.sum(own_gradient_soft == blog_gradient_soft) == np.prod(blog_gradient_soft.shape):
            print("YEAHHHH! gradients softmax are the same")
        else:
            print("NOOOO! gradients softmax are not the same")
            
        ## weight updates weight matrix softmax layer
#        if np.sum(own_weight_soft == blog_weights_updated) == np.prod(own_weight_soft.shape):
#            print("Yeaaah! updated weightmatrix softmax is the same")
#        else:
#            print("updated weightmatrix softmax is not the same")
#        ## weight updates bias vector
#        if np.sum(own_bias_soft == blog_biases_updated) == np.prod(blog_biases_updated.shape):
#            print("Yeaaah! Updated bias vector softmax is the same")
#        else:
#            print("updated bias vector is not the same")
        #### maxpool
        if np.sum(own_gradient_max== blog_gradient_max) == np.prod(blog_gradient_max.shape):
            print("YEAHHHH! gradients maxpool layer are the same")
        else:
            print("NOOOO! updated gradients maxpool are not the same")
        ## conv
#        if np.sum(own_filter_conv == blog_filter_update) == np.prod(own_filter_conv.shape):
#            print("YEAAAHHH! updated filter convlayer are the same")
#        else:
#            print("NOOOOO! updated filter conv layer is not the same")
#            
            
        # So! After two runs the predicted probabilities are already different, why?
        
        
    return None
    
Exemplo n.º 7
0
from getdata import getMnistData
from conv import Conv3x3
from maxpool import MaxPool2
from softmax import Softmax
import numpy as np

train_images, train_labels, test_images, test_labels = getMnistData(
    reshaped=False)
train_images = train_images[:3000]
train_labels = train_labels[:3000]
test_images = test_images[:1000]
test_labels = test_labels[:1000]

conv_layer = Conv3x3(num_filters=8)  # 28x28x1 => 26x26x8
pooling_layer = MaxPool2()  # 26x26x8 => 13x13x8
softmax_layer = Softmax(13 * 13 * 8, 10)  # 13x13x8 => 10


def forward(image, label):
    '''
    Completes a forward pass of the CNN and
    calculates the accuracy and cross-entrop loss.
    - image is 2-d numpy array
    - label is a digit
    '''

    # Converting image from [0, 255] => [-0.5, 0.5]

    out = conv_layer.forward((image / 255) - 0.5)
    out = pooling_layer.forward(out)
    out = softmax_layer.forward(out)