Example #1
0
def main():
    # load data
    fr = open('pca_params.pkl', 'rb')
    pca_params = pickle.load(fr, encoding='latin1')
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    feat = {}
    # Training
    print('--------Training--------')
    feature = saab.initialize(train_images, pca_params)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat['training_feature'] = feature

    print('--------Testing--------')
    feature = saab.initialize(test_images, pca_params)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat['testing_feature'] = feature

    # save data
    fw = open('feat.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()
Example #2
0
def main():

    # load data
    fr = open('pca_params_E5S5.pkl', 'rb')
    pca_params = pickle.load(fr)
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    batch_size = 100
    num_samples = int(len(train_images) / batch_size)

    # Training
    print('--------Training--------')
    features = []
    for i in range(num_samples):
        trn_images = train_images[i * batch_size:i * batch_size +
                                  batch_size, :]
        feature = saab.initialize(trn_images, pca_params)
        feature = feature.reshape(feature.shape[0], -1)
        features.append(feature)
    feature = np.vstack(features)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat = {}
    feat['feature'] = feature

    # save data
    fw = open('feat_E5S5.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()
def main():
    # load data
    fr = open('pca_params.pkl', 'rb')
    pca_params = pickle.load(fr)
    fr.close()

    # read data
    #train_images, train_labels, test_images, test_labels, class_list = data.import_data("0-9")
    n1 = misc.imread('/Users/brinalbheda/Desktop/HW6/1.png').reshape(32, 32, 1)
    n2 = misc.imread('/Users/brinalbheda/Desktop/HW6/2.png').reshape(32, 32, 1)
    n3 = misc.imread('/Users/brinalbheda/Desktop/HW6/3.png').reshape(32, 32, 1)
    n4 = misc.imread('/Users/brinalbheda/Desktop/HW6/4.png').reshape(32, 32, 1)
    arr_train_images = np.stack([n1, n2, n3, n4])
    arr_train_images = arr_train_images.astype('float32')
    train_images = arr_train_images

    print('Training image size:', train_images.shape)
    #print('Testing_image size:', test_images.shape)

    # Training
    print('--------Training--------')
    feature = saab.initialize(train_images, pca_params)
    fs = feature.shape
    np.save('fs', fs)
    feature = feature.reshape(feature.shape[0], -1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat = {}
    feat['feature'] = feature

    # save data
    fw = open('feat.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()
Example #4
0
def main():

    # load data
    fr = open('pca_params_E5S5.pkl', 'rb')
    pca_params = pickle.load(fr, encoding='latin1')
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Testing_image size:', test_images.shape)

    # testing
    print('--------Testing--------')
    feature = saab.initialize(test_images, pca_params)
    feature = feature.reshape(feature.shape[0], -1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')

    # feature normalization
    # std_var=(np.std(feature, axis=0)).reshape(1,-1)
    # feature=feature/std_var

    num_clusters = [120, 80, 10]
    use_classes = 10

    fr = open('llsr_weights_E5S5.pkl', 'rb')
    weights = pickle.load(fr, encoding='latin1')
    fr.close()

    fr = open('llsr_bias_E5S5.pkl', 'rb')
    biases = pickle.load(fr, encoding='latin1')
    fr.close()

    for k in range(len(num_clusters)):

        # least square regression
        weight = weights['%d LLSR weight' % k]
        bias = biases['%d LLSR bias' % k]
        feature = np.matmul(feature, weight) + bias
        print(k, ' layer LSR weight shape:', weight.shape)
        print(k, ' layer LSR bias shape:', bias.shape)
        print(k, ' layer LSR output shape:', feature.shape)

        if k != len(num_clusters) - 1:
            # Relu
            for i in range(feature.shape[0]):
                for j in range(feature.shape[1]):
                    if feature[i, j] < 0:
                        feature[i, j] = 0
        else:
            pred_labels = np.argmax(feature, axis=1)
            acc_test = sklearn.metrics.accuracy_score(test_labels, pred_labels)
            print('testing acc is {}'.format(acc_test))

    fw = open('test_pred_E5S5.pkl', 'wb')
    pickle.dump(feature, fw)
    fw.close()
Example #5
0
def main():
    # load data
    fr = open('pca_params_10.pkl', 'rb')
    pca_params = pickle.load(fr)
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    Edge_filter = np.array([-1, -2, 0, 2, 1])
    Level_filter = np.array([1, 4, 6, 4, 1])

    Spot_filter = np.array([-1, 0, 2, 0, -1])
    Wave_filter = np.array([-1, 2, 0, -2, 1])
    Ripple_filter = np.array([1, -4, 6, -4, 1])

    Filter = Wave_filter.T * Ripple_filter
    #Filter2=Level_filter.T*Edge_filter
    #Filter1=Level_filter.T*Level_filter
    altered_train = train_images.copy()

    for i in range(train_images.shape[0]):
        altered_train[i, :, :, 0] = cv2.filter2D(train_images[i, :, :, 0], -1,
                                                 Filter)
    train_images = altered_train.copy()

    # Training
    print('--------Training--------')
    train_images = train_images[0:3000, :, :, :]  #Taking only 3000 samples
    train_labels = train_labels[0:3000]
    feature = saab.initialize(train_images, pca_params)
    feature = feature.reshape(feature.shape[0], -1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat = {}
    feat['feature'] = feature

    # save data
    fw = open('feat_10.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()
Example #6
0
def main():
    	# load data
    fr=open('pca_params.pkl','rb')  
    pca_params=pickle.load(fr)
    fr.close()
    
    #	read data
    image1=misc.imread('1.png').reshape(32,32,1)
    image2=misc.imread('2.png').reshape(32,32,1)
    image3=misc.imread('3.png').reshape(32,32,1)
    image4=misc.imread('4.png').reshape(32,32,1)
    image1=image1/255.
    image2=image2/255.
    image3=image3/255.
    image4=image4/255.
    image=np.stack([image1,image2,image3,image4]);
    image=image.astype('float32')
    train_images=image;
    #	train_images, train_labels, test_images, test_labels, class_list = data.import_data("0-9")
    print('Training image size:', train_images.shape)
    #	print('Testing_image size:', test_images.shape)
    	
    	# Training
    print('--------Training--------')
    feature=saab.initialize(train_images, pca_params) 
    fs=feature.shape
    np.save('fs',fs)
    feature=feature.reshape(feature.shape[0],-1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat={}
    feat['feature']=feature
    	
    	# save data
    fw=open('feat.pkl','wb')    
    pickle.dump(feat, fw)    
    fw.close()
def main():
    # load data
    fr=open('pca_params_10.pkl','rb')  
    pca_params=pickle.load(fr, encoding='latin1')
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data("0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)
    
    Level_filter=np.array([1,4,6,4,1])
    Edge_filter=np.array([-1,-2, 0 ,2, 1])
    #Filter1=Level_filter.T*Level_filter
    Spot_filter=np.array([-1, 0, 2, 0, -1])
    Wave_filter=np.array([-1, 2, 0, -2, 1])
    Ripple_filter=np.array([1 ,-4, 6 ,-4, 1])
    #Filter1=Level_filter.T*Level_filter

    Filter=Wave_filter.T*Ripple_filter
    altered_train=train_images.copy()
    altered_test=train_images.copy()
    
    for i in range(test_images.shape[0]):
        altered_test[i,:,:,0] = cv2.filter2D(test_images[i,:,:,0],-1,Filter)
    test_images = altered_test.copy() 
    
    test_images = test_images[3000:6000,:,:,:]
    test_labels = test_labels[3000:6000]
    
    # testing
    print('--------Testing--------')
    feature=saab.initialize(test_images, pca_params)
    feature=feature.reshape(feature.shape[0],-1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')

    # feature normalization
    #std_var=(np.std(feature, axis=0)).reshape(1,-1)
    #feature=feature/std_var
    
    num_clusters=[120, 80, 10]
    use_classes=10
    fr=open('llsr_weights_10.pkl','rb')  
    weights=pickle.load(fr)
    fr.close()
    fr=open('llsr_bias_10.pkl','rb')  
    biases=pickle.load(fr)
    fr.close()

    for k in range(len(num_clusters)):
        # least square regression
        weight=weights['%d LLSR weight'%k]
        bias=biases['%d LLSR bias'%k]
        feature=np.matmul(feature,weight)
        feature=feature+bias
        print(k,' layer LSR weight shape:', weight.shape)
        print(k,' layer LSR bias shape:', bias.shape)
        print(k,' layer LSR output shape:', feature.shape)
        
        if k!=len(num_clusters)-1:
            pred_labels=np.argmax(feature, axis=1)
            num_clas=np.zeros((num_clusters[k],use_classes))
            for i in range(num_clusters[k]):
                for t in range(use_classes):
                    for j in range(feature.shape[0]):
                        if pred_labels[j]==i and train_labels[j]==t:
                            num_clas[i,t]+=1
            acc_train=np.sum(np.amax(num_clas, axis=1))/feature.shape[0]
            print(k,' layer LSR testing acc is {}'.format(acc_train))

            # Relu
            for i in range(feature.shape[0]):
                for j in range(feature.shape[1]):
                    if feature[i,j]<0:
                        feature[i,j]=0
        else:
            pred_labels=np.argmax(feature, axis=1)
            acc_train=sklearn.metrics.accuracy_score(test_labels,pred_labels)
            print('testing acc is {}'.format(acc_train))

    fw=open('ffcnn_feature_test10.pkl','wb')
    pickle.dump(feature, fw)    
    fw.close()
def main():
	# load data
    fr=open('pca_params.pkl','rb')  
    pca_params=pickle.load(fr)
    fr.close()	
    
    image = cv2.imread(r'C:\Users\lenovo\Documents\4.png',0)
    h,w=img.shape
    img= image.reshape(1,h,w,1)
    print('--------Training--------')
    feature,pca_params=saab.initialize(img, pca_params)
    n,h,w,c=feature.shape
    feature=feature.reshape(feature.shape[0],-1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat={}
    feat['feature']=feature
	
    #print(feat)
    print(pca_params)
    print(pca_params['Layer_1/kernel'].shape)
	# save data
    num_channels=[8,16]
    
    #Reconstruction
    feature = feature.reshape(n,h,w,c)
    transformed=feature.reshape(h*w,-1)
    
    for i in range(1,-1,-1):
        feature_expectation=pca_params['Layer_%d/feature_expectation'%i]
        dc = pca_params['Layer_%d/dc'%i]
        kernels=pca_params['Layer_%d/kernel'%i]
        
        
        if i==1:
            bias=pca_params['Layer_%d/bias'%i]
            e=np.zeros((1, kernels.shape[0]))
            e[0,0]=1
            transformed+=e*bias
            sample_patches_centered_with_bias = np.matmul(transformed,np.linalg.pinv(np.transpose(kernels)))
            sample_patches_centered = sample_patches_centered_with_bias - np.sqrt(16*num_channels[0])*bias
        else:
            transformed=transformed.reshape(64,num_channels[0])
            sample_patches_centered = np.matmul(transformed,np.linalg.pinv(np.transpose(kernels)))
            
        sample_patches = sample_patches_centered + feature_expectation
        ac = int(np.sqrt(sample_patches.shape[0]))
        sample_patches = sample_patches.reshape(1,ac,ac,-1)
        h,w = ac,ac
        if i==1:
            sample_patches = sample_patches.reshape(1,2,2,1,1,4,4,num_channels[0])
            patches= sample_patches.transpose(0,1,3,5,2,4,6,7).reshape(1,8,8,num_channels[0])
            transformed = patches
        else:
            sample_patches = sample_patches.reshape(1,8,8,1,1,4,4,1)
            patches= sample_patches.transpose(0,1,3,5,2,4,6,7).reshape(1,32,32,1)
            transformed = patches
    
    transformed = transformed.reshape(32,32)
    plt.imshow(transformed,cmap='gray')
            
    PSNR=10*math.log10((255**2/(sum(sum((transformed-image[0,:,:,0])**2))/(32*32))))
    
    print(PSNR)
    fw=open('feat.pkl','wb')    
    pickle.dump(feat, fw)    
    fw.close()
Example #9
0
t2 = t2/255.
test_images.append(t2)

t3 = np.array(t3.getdata())
t3 = t3.reshape(-1,32,32,1)
t3 = t3/255.
test_images.append(t3)

t4 = np.array(t4.getdata())
t4 = t4.reshape(-1,32,32,1)
t4 = t4/255.
test_images.append(t4)

test_images= np.vstack(test_images)
test_labels = [9,0,5,3]
f = saab.initialize(test_images,pca_params)

num_layers=pca_params['num_layers']
kernel_sizes=pca_params['kernel_size']

trans= f
for i in range(num_layers-1,-1,-1):
    print('--------stage %d --------'%i)
    feature_expectation = pca_params['Layer_%d/feature_expectation'%i]
    kernels = pca_params['Layer_%d/kernel'%i]
    mean = pca_params['Layer_%d/pca_mean'%i]

    if i==0:
        # Transform to get data for the previous stage
        sample_patches_centered=np.matmul(trans, np.linalg.pinv(np.transpose(kernels))
    else:
Example #10
0
def main():
    # load data
    fr = open('pca_params.pkl', 'rb')
    pca_params = pickle.load(fr)
    fr.close()

    # read data
    #train_images, train_labels, class_list = data.import_data("0-9")
    train_img1 = color.rgb2gray(io.imread('1.png'))
    train_img2 = color.rgb2gray(io.imread('2.png'))
    train_img3 = color.rgb2gray(io.imread('3.png'))
    train_img4 = color.rgb2gray(io.imread('4.png'))
    train_images = train_img4
    #train_images  = train_img1;
    train_images = train_images.reshape(1, 32, 32, 1)

    print('Training image size: dtype: ', train_images.shape,
          train_images.dtype)
    #print('Testing_image size:', test_images.shape)

    # Training
    print('--------Training--------')
    train_red = train_images

    feature, pca_params = saab.initialize(train_red, pca_params)

    n, h, w, c = np.shape(feature)

    # 60000x400 (16*5*5)
    feature = feature.reshape(feature.shape[0], -1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    print('feature.dtype: {}'.format(feature.dtype))
    feat = {}
    feat['feature'] = feature

    #Reconstructing  the image

    feature = np.reshape(feature, (n, h, w, c))
    trans = np.reshape(feature, (h * w, -1))

    stages = [1, 0]

    for i in stages:
        DC_comp = pca_params['Layer_%d/dc' % i]
        f_expect = pca_params['Layer_%d/feature_expectation' % i]
        kernels = pca_params['Layer_%d/kernel' % i]
        kernelst_inv = np.linalg.pinv(np.transpose(kernels))

        if (i == 0):
            trans = np.reshape(trans, (64, 8))
            spc = np.matmul(trans, kernelst_inv)
        else:
            kernels_shape = np.shape(kernels)
            e = np.zeros((1, kernels_shape[0]))
            e[0, 0] = 1
            bias_factor = pca_params['Layer_%d/bias' % i]

            trans += e * bias_factor
            spcwb = np.matmul(trans, kernelst_inv)
            spc = spcwb - ((np.sqrt(128)) * bias_factor)

        s_p = spc + f_expect
        s_p_shape = np.shape(s_p)
        h = int(np.sqrt(s_p_shape[0]))
        w = int(np.sqrt(s_p_shape[0]))
        s_p = s_p.reshape(1, h, w, -1)

        s_p_t = [0, 1, 3, 5, 2, 4, 6, 7]

        if (i == 0):
            s_p_bef = [1, 8, 8, 1, 1, 4, 4, 1]
            s_p = np.reshape(s_p, (s_p_bef))
            p = s_p.transpose(s_p_t).reshape(1, 32, 32, 1)
            trans = p
        else:
            s_p_bef = [1, 2, 2, 1, 1, 4, 4, 8]
            s_p = np.reshape(s_p, (s_p_bef))
            p = s_p.transpose(s_p_t).reshape(1, 8, 8, 8)
            trans = p

    trans = np.reshape(trans, (32, 32))

    plt.imshow(trans, cmap='gray')

    #PSNR calculation
    def PSNR_cal(I_image, y_image):
        den = 0
        for i in range(0, 32):
            for j in range(0, 32):
                den += (y_image[i][j] - I_image[i][j])**2
        factor = (1 / (32 * 32))
        mse = factor * den
        psnr = 10 * np.log10((255 * 255) / mse)
        return psnr

    psnr = PSNR_cal(train_img4, trans)

    print("The psnr is", psnr)
    '''#Reconstructing  the image
    
    feature = np.reshape(feature,(n,h,w,c))
    trans = np.reshape(feature,(h*w,-1))
    
    stages = [1,0];
    
    for i in stages:
        DC_comp = pca_params['Layer_%d/dc'%i]
        f_expect = pca_params['Layer_%d/feature_expectation'%i]
        kernels = pca_params['Layer_%d/kernel'%i]
        kernelst_inv = np.linalg.pinv(np.transpose(kernels))

        
        if (i == 0):
            trans = np.reshape(trans,( 64, 4 ))
            spc = np.matmul(trans,kernelst_inv)
        else:
            kernels_shape = np.shape(kernels)
            e = np.zeros( ( 1, kernels_shape[0] ) )
            e[0 ,0 ] = 1
            bias_factor = pca_params['Layer_%d/bias'%i]
            
            trans += e*bias_factor;
            spcwb = np.matmul(trans,kernelst_inv)
            spc = spcwb - ((np.sqrt(64))*bias_factor)
            
        s_p = spc + f_expect
        s_p_shape = np.shape(s_p)
        h = int(np.sqrt(s_p_shape[0]))
        w = int(np.sqrt(s_p_shape[0]))
        s_p = s_p.reshape(1, h, w, -1)
        
        s_p_t = [0,1,3,5,2,4,6,7]
        
        if (i == 0):
            s_p_bef = [1,8,8,1,1,4,4,1]
            s_p = np.reshape(s_p,(1,8,8,1,1,4,4,1))
            p = s_p.transpose(s_p_t).reshape(1,32,32,1)
            trans = p;
        else:
            s_p_bef = [1,2,2,1,1,4,4,4]
            s_p = np.reshape(s_p,(1,2,2,1,1,4,4,4))
            p = s_p.transpose(s_p_t).reshape(1,8,8,4)
            trans = p;
            
    trans = np.reshape(trans,(32,32))
    
    plt.imshow(trans,cmap = 'gray')
    
    #PSNR calculation
    def PSNR_cal(I_image,y_image):
        den = 0;
        for i in range(0,32):
            for j in range(0,32):
                den += (y_image[i][j] - I_image[i][j])**2;
        factor = (1/(32*32));
        mse = factor*den;
        psnr = 10*np.log10((255*255)/mse);
        return psnr;
    psnr = PSNR_cal(train_img4,trans)
                
    
    
    print("The psnr is" ,psnr)'''

    # save data
    fw = open('feat.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()