Example #1
0
def main():

    # load data
    fr = open('pca_params_E5S5.pkl', 'rb')
    pca_params = pickle.load(fr)
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    batch_size = 100
    num_samples = int(len(train_images) / batch_size)

    # Training
    print('--------Training--------')
    features = []
    for i in range(num_samples):
        trn_images = train_images[i * batch_size:i * batch_size +
                                  batch_size, :]
        feature = saab.initialize(trn_images, pca_params)
        feature = feature.reshape(feature.shape[0], -1)
        features.append(feature)
    feature = np.vstack(features)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat = {}
    feat['feature'] = feature

    # save data
    fw = open('feat_E5S5.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()
Example #2
0
def main():
    # load data
    fr = open('pca_params_compact.pkl', 'rb')
    pca_params = pickle.load(fr)
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data("0-9")
    print('Training image size: dtype: ', train_images.shape, train_images.dtype)
    print('Testing_image size:', test_images.shape)

    # Training
    print('--------Training--------')
    train_images=np.moveaxis(train_images, 3, 1)
    feature = saab.initialize(train_images, pca_params)
    # 60000x400 (16*5*5)
    feature = feature.reshape(feature.shape[0], -1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    print 'feature.dtype: {}'.format(feature.dtype)
    feat = {}
    feat['feature'] = feature

    # save data
    fw = open('feat_compact.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()
Example #3
0
def main():
        # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        FLAGS.use_classes)
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    kernel_sizes = saab.parse_list_string(FLAGS.kernel_sizes)
    stride = FLAGS.stride
    if FLAGS.num_kernels:
        num_kernels = saab.parse_list_string(FLAGS.num_kernels)
    else:
        num_kernels = None
    energy_percent = FLAGS.energy_percent
    use_num_images = FLAGS.use_num_images
    print('Parameters:')
    print('use_classes:', class_list)
    print('Kernel_sizes:', kernel_sizes)
    print('Stride:', stride)
    print('Number_kernels:', num_kernels)
    print('Energy_percent:', energy_percent)
    print('Number_use_images:', use_num_images)

    pca_params = saab.multi_Saab_transform(train_images, train_labels,
                                           kernel_sizes=kernel_sizes,
                                           stride=stride,
                                           num_kernels=num_kernels,
                                           energy_percent=energy_percent,
                                           use_num_images=use_num_images,
                                           use_classes=class_list)

    # save data
    fw = open('pca_params.pkl', 'wb')
    pickle.dump(pca_params, fw)
    fw.close()
def main():
    # load data
    fr=open('pca_params33.pkl','rb')  
    pca_params=pickle.load(fr)
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data("0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)
    #print('pca_params:',pca_params)
	
    # Training

    f=[]
    feature=[]
    for i in range(0,60000,200):
        print('--------Training--------')
        f=saab3.initialize(train_images[i:i+200], pca_params) 
        f=f.reshape(f.shape[0],-1)
        feature.append(f)
        
    feature= np.vstack(feature)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat={}
    feat['feature']=feature
	
    # save data
    fw=open('feat33.pkl','wb')    
    pickle.dump(feat, fw)    
    fw.close()
Example #5
0
def main():
    # load data
    fr = open('pca_params.pkl', 'rb')
    pca_params = pickle.load(fr, encoding='latin1')
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    feat = {}
    # Training
    print('--------Training--------')
    feature = saab.initialize(train_images, pca_params)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat['training_feature'] = feature

    print('--------Testing--------')
    feature = saab.initialize(test_images, pca_params)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat['testing_feature'] = feature

    # save data
    fw = open('feat.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()
Example #6
0
def main():

    # load data
    fr = open('pca_params_E5S5.pkl', 'rb')
    pca_params = pickle.load(fr, encoding='latin1')
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Testing_image size:', test_images.shape)

    # testing
    print('--------Testing--------')
    feature = saab.initialize(test_images, pca_params)
    feature = feature.reshape(feature.shape[0], -1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')

    # feature normalization
    # std_var=(np.std(feature, axis=0)).reshape(1,-1)
    # feature=feature/std_var

    num_clusters = [120, 80, 10]
    use_classes = 10

    fr = open('llsr_weights_E5S5.pkl', 'rb')
    weights = pickle.load(fr, encoding='latin1')
    fr.close()

    fr = open('llsr_bias_E5S5.pkl', 'rb')
    biases = pickle.load(fr, encoding='latin1')
    fr.close()

    for k in range(len(num_clusters)):

        # least square regression
        weight = weights['%d LLSR weight' % k]
        bias = biases['%d LLSR bias' % k]
        feature = np.matmul(feature, weight) + bias
        print(k, ' layer LSR weight shape:', weight.shape)
        print(k, ' layer LSR bias shape:', bias.shape)
        print(k, ' layer LSR output shape:', feature.shape)

        if k != len(num_clusters) - 1:
            # Relu
            for i in range(feature.shape[0]):
                for j in range(feature.shape[1]):
                    if feature[i, j] < 0:
                        feature[i, j] = 0
        else:
            pred_labels = np.argmax(feature, axis=1)
            acc_test = sklearn.metrics.accuracy_score(test_labels, pred_labels)
            print('testing acc is {}'.format(acc_test))

    fw = open('test_pred_E5S5.pkl', 'wb')
    pickle.dump(feature, fw)
    fw.close()
Example #7
0
def run():
    v_df = import_data()

    real_front = [34.575, 39.075, 41.775, 39.625, 39.725]
    real_back = [33.075, 36.225, 38.675, 37.375, 37.725]

    real = pd.DataFrame(data={'front': real_front, 'back': real_back})
    result = train(v_df, real)

    print(result)
Example #8
0
def main():
    # read data
    dataset = sys.argv[1]
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        dataset, "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)
    myCNN = InterpretableCNN.InterpretableCNN(train_images, train_labels,
                                              test_images, test_labels,
                                              class_list)
    myCNN.getKernel()
    myCNN.getFeature()
    myCNN.getWeight()
Example #9
0
def main():
    fr = open('llsr_weights.pkl', 'rb')
    weights = pickle.load(fr, encoding='latin1')
    fr.close()
    fr = open('llsr_bias.pkl', 'rb')
    biases = pickle.load(fr, encoding='latin1')
    fr.close()
    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    # load feature
    fr = open('feat.pkl', 'rb')
    feat = pickle.load(fr, encoding='latin1')
    fr.close()
    feature = feat['testing_feature']
    test_labels = feat['testing_labels']
    feature = np.absolute(feature)
    feature = feature.reshape(feature.shape[0], -1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')

    # feature normalization
    std_var = (np.std(feature, axis=0)).reshape(1, -1)
    feature = feature / std_var
    # relu
    for i in range(feature.shape[0]):
        for j in range(feature.shape[1]):
            if feature[i, j] < 0:
                feature[i, j] = 0

    num_clusters = [200, 100, 10]
    use_classes = 10
    for k in range(len(num_clusters)):
        weight = weights['%d LLSR weight' % k]
        bias = biases['%d LLSR bias' % k]
        feature = np.matmul(feature, weight) + bias
        print(k, ' layer LSR weight shape:', weight.shape)
        print(k, ' layer LSR output shape:', feature.shape)
        if k != len(num_clusters) - 1:
            # Relu
            for i in range(feature.shape[0]):
                for j in range(feature.shape[1]):
                    if feature[i, j] < 0:
                        feature[i, j] = 0
        else:
            pred_labels = np.argmax(feature, axis=1)
            acc_test = sklearn.metrics.accuracy_score(test_labels, pred_labels)
            print('testing acc is {}'.format(acc_test))
Example #10
0
def main():
    # load data
    fr = open('pca_params_10.pkl', 'rb')
    pca_params = pickle.load(fr)
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    Edge_filter = np.array([-1, -2, 0, 2, 1])
    Level_filter = np.array([1, 4, 6, 4, 1])

    Spot_filter = np.array([-1, 0, 2, 0, -1])
    Wave_filter = np.array([-1, 2, 0, -2, 1])
    Ripple_filter = np.array([1, -4, 6, -4, 1])

    Filter = Wave_filter.T * Ripple_filter
    #Filter2=Level_filter.T*Edge_filter
    #Filter1=Level_filter.T*Level_filter
    altered_train = train_images.copy()

    for i in range(train_images.shape[0]):
        altered_train[i, :, :, 0] = cv2.filter2D(train_images[i, :, :, 0], -1,
                                                 Filter)
    train_images = altered_train.copy()

    # Training
    print('--------Training--------')
    train_images = train_images[0:3000, :, :, :]  #Taking only 3000 samples
    train_labels = train_labels[0:3000]
    feature = saab.initialize(train_images, pca_params)
    feature = feature.reshape(feature.shape[0], -1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat = {}
    feat['feature'] = feature

    # save data
    fw = open('feat_10.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()
def main():
    num_sample = 300
    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        FLAGS.use_classes)

    train_images = train_images[:num_sample]
    train_labels = train_labels[:num_sample]
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)
    print('Training images.dtype ', train_images.dtype)

    kernel_sizes = saab.parse_list_string(FLAGS.kernel_sizes)
    if FLAGS.num_kernels:
        num_kernels = saab.parse_list_string(FLAGS.num_kernels)
    else:
        num_kernels = None
    energy_percent = FLAGS.energy_percent
    use_num_images = FLAGS.use_num_images
    print('Parameters:')
    print('use_classes:', class_list)
    print('Kernel_sizes:', kernel_sizes)
    print('Number_kernels:', num_kernels)
    print('Energy_percent:', energy_percent)
    print('Number_use_images:', use_num_images)
    train_images = np.moveaxis(train_images, 3, 1)
    pca_params = saab.multi_Saab_transform(train_images,
                                           train_labels,
                                           kernel_sizes=kernel_sizes,
                                           num_kernels=num_kernels,
                                           energy_percent=energy_percent,
                                           use_num_images=use_num_images,
                                           use_classes=class_list)
    # save data
    fw = open('pca_params_compact.pkl', 'wb')
    pickle.dump(pca_params, fw)
    fw.close()

    # load data
    fr = open('pca_params_compact.pkl', 'rb')
    data1 = pickle.load(fr)
    # print(data1)
    fr.close()
def main():
    # load data
    fr = open('pca_params55.pkl', 'rb')
    pca_params = pickle.load(fr)
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)
    #print('pca_params:',pca_params)

    train = np.zeros((10000, 32, 32, 1))
    t = []
    #Apply laws filter
    for im in train_images[0:10000]:
        t.append(laws(im))

    t = np.vstack(t)
    train = t
    train = train.reshape(10000, 32, 32, 1)
    feature = []
    for i in range(0, 10000, 200):
        print('i', i)
        print('--------Training--------')

        print('shape', train.shape)
        f = saab3.initialize(train[i:i + 200], pca_params)
        f = f.reshape(f.shape[0], -1)
        feature.append(f)

    feature = np.vstack(feature)
    #print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')
    feat = {}
    feat['feature'] = feature

    # save data
    fw = open('feat33.pkl', 'wb')
    pickle.dump(feat, fw)
    fw.close()
def main():
    # load data
    fr=open('pca_params_10.pkl','rb')  
    pca_params=pickle.load(fr, encoding='latin1')
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data("0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)
    
    Level_filter=np.array([1,4,6,4,1])
    Edge_filter=np.array([-1,-2, 0 ,2, 1])
    #Filter1=Level_filter.T*Level_filter
    Spot_filter=np.array([-1, 0, 2, 0, -1])
    Wave_filter=np.array([-1, 2, 0, -2, 1])
    Ripple_filter=np.array([1 ,-4, 6 ,-4, 1])
    #Filter1=Level_filter.T*Level_filter

    Filter=Wave_filter.T*Ripple_filter
    altered_train=train_images.copy()
    altered_test=train_images.copy()
    
    for i in range(test_images.shape[0]):
        altered_test[i,:,:,0] = cv2.filter2D(test_images[i,:,:,0],-1,Filter)
    test_images = altered_test.copy() 
    
    test_images = test_images[3000:6000,:,:,:]
    test_labels = test_labels[3000:6000]
    
    # testing
    print('--------Testing--------')
    feature=saab.initialize(test_images, pca_params)
    feature=feature.reshape(feature.shape[0],-1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')

    # feature normalization
    #std_var=(np.std(feature, axis=0)).reshape(1,-1)
    #feature=feature/std_var
    
    num_clusters=[120, 80, 10]
    use_classes=10
    fr=open('llsr_weights_10.pkl','rb')  
    weights=pickle.load(fr)
    fr.close()
    fr=open('llsr_bias_10.pkl','rb')  
    biases=pickle.load(fr)
    fr.close()

    for k in range(len(num_clusters)):
        # least square regression
        weight=weights['%d LLSR weight'%k]
        bias=biases['%d LLSR bias'%k]
        feature=np.matmul(feature,weight)
        feature=feature+bias
        print(k,' layer LSR weight shape:', weight.shape)
        print(k,' layer LSR bias shape:', bias.shape)
        print(k,' layer LSR output shape:', feature.shape)
        
        if k!=len(num_clusters)-1:
            pred_labels=np.argmax(feature, axis=1)
            num_clas=np.zeros((num_clusters[k],use_classes))
            for i in range(num_clusters[k]):
                for t in range(use_classes):
                    for j in range(feature.shape[0]):
                        if pred_labels[j]==i and train_labels[j]==t:
                            num_clas[i,t]+=1
            acc_train=np.sum(np.amax(num_clas, axis=1))/feature.shape[0]
            print(k,' layer LSR testing acc is {}'.format(acc_train))

            # Relu
            for i in range(feature.shape[0]):
                for j in range(feature.shape[1]):
                    if feature[i,j]<0:
                        feature[i,j]=0
        else:
            pred_labels=np.argmax(feature, axis=1)
            acc_train=sklearn.metrics.accuracy_score(test_labels,pred_labels)
            print('testing acc is {}'.format(acc_train))

    fw=open('ffcnn_feature_test10.pkl','wb')
    pickle.dump(feature, fw)    
    fw.close()
Example #14
0
import data
import figures
import numerical_results

#############
# IMPORT DATA
#############

dat = data.import_data()

#####################################
# GENERATE AND SAVE NUMERICAL RESULTS
#####################################

numerical_results.write_numerical_results(dat)

###########################
# GENERATE AND SAVE FIGURES
###########################

figures.fig2(dat)
figures.fig3(dat)


Example #15
0
def main():
    # load data
    fr=open('pca_params.pkl','rb')  
    pca_params=pickle.load(fr, encoding='latin1')
    fr.close()

    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data("0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)
    
    train_images = train_images[:6000]
    train_labels = train_labels[:6000]
    # load feature
    fr=open('feat.pkl','rb')  
    feat=pickle.load(fr, encoding='latin1')
    fr.close()
    feature=feat['feature']
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')

    # feature normalization
    #std_var=(np.std(feature, axis=0)).reshape(1,-1)
    #feature=feature/std_var

    num_clusters=[120, 84, 10]
    use_classes=10
    weights={}
    bias={}
    for k in range(len(num_clusters)):
        if k!=len(num_clusters)-1:
            # Kmeans_Mixed_Class (too slow for CIFAR, changed into Fixed Class)
            kmeans=KMeans(n_clusters=num_clusters[k]).fit(feature)
            pred_labels=kmeans.labels_
            num_clas=np.zeros((num_clusters[k],use_classes))
            for i in range(num_clusters[k]):
                for t in range(use_classes):
                    for j in range(feature.shape[0]):
                        if pred_labels[j]==i and train_labels[j]==t:
                            num_clas[i,t]+=1
            acc_train=np.sum(np.amax(num_clas, axis=1))/feature.shape[0]
            print(k,' layer Kmean (just ref) training acc is {}'.format(acc_train))

            # Compute centroids
            clus_labels=np.argmax(num_clas, axis=1)
            centroid=np.zeros((num_clusters[k], feature.shape[1]))
            for i in range(num_clusters[k]):
                t=0
                for j in range(feature.shape[0]):
                    if pred_labels[j]==i and clus_labels[i]==train_labels[j]:
                        if t==0:
                            feature_test=feature[j].reshape(1,-1)
                        else:
                            feature_test=np.concatenate((feature_test, feature[j].reshape(1,-1)), axis=0)
                        t+=1
                centroid[i]=np.mean(feature_test, axis=0, keepdims=True)

            # Compute one hot vector
            t=0
            labels=np.zeros((feature.shape[0], num_clusters[k]))
            for i in range(feature.shape[0]):
                if clus_labels[pred_labels[i]]==train_labels[i]:
                    labels[i,pred_labels[i]]=1
                else:
                    distance_assigned=euclidean_distances(feature[i].reshape(1,-1), centroid[pred_labels[i]].reshape(1,-1))
                    cluster_special=[j for j in range(num_clusters[k]) if clus_labels[j]==train_labels[i]]
                    distance=np.zeros(len(cluster_special))
                    for j in range(len(cluster_special)):
                        distance[j]=euclidean_distances(feature[i].reshape(1,-1), centroid[cluster_special[j]].reshape(1,-1))
                    labels[i, cluster_special[np.argmin(distance)]]=1

            # least square regression
            A=np.ones((feature.shape[0],1))
            feature=np.concatenate((A,feature),axis=1)
            weight=np.matmul(LA.pinv(feature),labels)
            feature=np.matmul(feature,weight)
            weights['%d LLSR weight'%k]=weight[1:weight.shape[0]]
            bias['%d LLSR bias'%k]=weight[0].reshape(1,-1)
            print(k,' layer LSR weight shape:', weight.shape)
            print(k,' layer LSR output shape:', feature.shape)

            pred_labels=np.argmax(feature, axis=1)
            num_clas=np.zeros((num_clusters[k],use_classes))
            for i in range(num_clusters[k]):
                for t in range(use_classes):
                    for j in range(feature.shape[0]):
                        if pred_labels[j]==i and train_labels[j]==t:
                            num_clas[i,t]+=1
            acc_train=np.sum(np.amax(num_clas, axis=1))/feature.shape[0]
            print(k,' layer LSR training acc is {}'.format(acc_train))

            # Relu
            for i in range(feature.shape[0]):
                for j in range(feature.shape[1]):
                    if feature[i,j]<0:
                        feature[i,j]=0

            # # Double relu
            # for i in range(feature.shape[0]):
            #     for j in range(feature.shape[1]):
            #         if feature[i,j]<0:
            #             feature[i,j]=0
            #         elif feature[i,j]>1:
            #             feature[i,j]=1
        else:
            # least square regression
            labels=keras.utils.to_categorical(train_labels,10)
            A=np.ones((feature.shape[0],1))
            feature=np.concatenate((A,feature),axis=1)
            weight=np.matmul(LA.pinv(feature),labels)
            feature=np.matmul(feature,weight)
            weights['%d LLSR weight'%k]=weight[1:weight.shape[0]]
            bias['%d LLSR bias'%k]=weight[0].reshape(1,-1)
            print(k,' layer LSR weight shape:', weight.shape)
            print(k,' layer LSR output shape:', feature.shape)
            
            pred_labels=np.argmax(feature, axis=1)
            acc_train=sklearn.metrics.accuracy_score(train_labels,pred_labels)
            print('training acc is {}'.format(acc_train))
    # save data
    fw=open('llsr_weights.pkl','wb')    
    pickle.dump(weights, fw)    
    fw.close()
    fw=open('llsr_bias.pkl','wb')    
    pickle.dump(bias, fw)    
    fw.close()
    fw=open('llsr_feature10.pkl','wb')    
    pickle.dump(feature, fw)    
    fw.close()
Example #16
0
# main.py
# The main program; primary interface to the classification model

import data
import model

(train_data, train_labels, test_data, test_labels) = data.import_data()
classifier = model.Classifier()
classifier.train(train_data, train_labels)
classifier.evaluate(test_data, test_labels)
classifier.cv(test_data, test_labels)
from analysis_functions import *
import data





########################################
# Importing data and setting variables #
########################################

name_mapping, rank_count, rank_id = data.import_data(save=False,try_from_csv=True)
m='M';f='F'
# Splitting all gender data into individual dataframes and dictionaries
id2nm = name_mapping['id2str'][m]
id2nf = name_mapping['id2str'][f]
n2idm = name_mapping['str2id'][m]
n2idf = name_mapping['str2id'][f]
rcm = rank_count[m]
rcf = rank_count[f]
ridm = rank_id[m]
ridf = rank_id[f]
years = np.arange(start=1880,stop=1880+rcm.columns.size)
year_cm = np.array([[(years[i]-years.min())/(years.max()-years.min()) for i in range(len(years))],
                    [0]*len(years),
                    [(years.max()-years[i])/(years.max()-years.min()) for i in range(len(years))]])

# Other Variables
verbose = False
cur_fig = 1
Example #18
0
def main():
    df_list = data.import_data()
    df = df_list[0]
    excel_export(df)
def main():
	# read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(FLAGS.use_classes)
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    train_images = train_images[:6000]
    train_labels = train_labels[:6000]
    kernel_sizes=saab.parse_list_string(FLAGS.kernel_sizes)
    if FLAGS.num_kernels:
    	num_kernels=saab.parse_list_string(FLAGS.num_kernels)
    else:
    	num_kernels=None
    energy_percent=FLAGS.energy_percent
    use_num_images=FLAGS.use_num_images
    print('Parameters:')
    print('use_classes:', class_list)
    print('Kernel_sizes:', kernel_sizes)
    print('Number_kernels:', num_kernels)
    print('Energy_percent:', energy_percent)
    print('Number_use_images:', use_num_images)
    
    def creating_law_filters(a,b):
        ten_product = np.tensordot(a,b,axes=0);
        return ten_product;
    
    #Function to apply Boundary Extension
    def boundary_extension(Image,n):
        Ext_image = np.zeros((Row_Size+(2*n),Column_Size+(2*n)))
        #Complete image
        #Ext_image = np.pad(Image,n,'reflect');
        for i in range(n,(Row_Size+n)):
            for j in range(n,(Column_Size+n)):
                Ext_image[i][j] = Image[i-n][j-n];
        #Upper rows    
        for i in range(0,n):
            for j in range(n, (Column_Size+n)):
                Ext_image[i][j] = Image[0][j-n];
        #Left columns
        for j in range(0,n):
            for i in range(n, (Row_Size+n)):
                Ext_image[i][j] = Image[i-n][0];
        #Bottom rows
        for i in range(Row_Size+n, (Row_Size+(2*n))):
            for j in range(n,(Column_Size+n)):
                Ext_image[i][j] = Image[Row_Size-1][j-n];
        #Right columns
        for j in range(Column_Size+n, Column_Size+(2*n)):
            for i in range(n,(Row_Size+n)):
                Ext_image[i][j] = Image[i-n][Column_Size-1];
        #Corners
        for i in range(0,n):
            for j in range(0,n):
                Ext_image[i][j] = Image[0][0];
    
        for i in range(0,n):
            for j in range(Column_Size+n,Column_Size+(2*n)):
                Ext_image[i][j] = Image[0][Column_Size-1];
    
        for j in range(0,n):
            for i in range(Row_Size+n,Row_Size+(2*n)):
                Ext_image[i][j] = Image[Row_Size-1][0];
    
        for j in range(Column_Size+n, Column_Size+(2*n)):
            for i in range(Row_Size+n, Row_Size+(2*n)):
                Ext_image[i][j] = Image[Row_Size-1][Column_Size-1];
        
        return Ext_image;
    
    
    def law_filter_application(Ext_image,Law_filter):
        Law_applied = np.zeros((Row_Size,Column_Size))
        for i in range(2, Row_Size+2):
            for j in range(2, Column_Size+2):
                m = 0;
                k = 0;
                l = 0;
                for k in range(i-2, i+3):
                    n = 0;
                    for l in range(j-2, j+3):
                        Law_applied[i-2][j-2] = Law_applied[i-2][j-2] + (Ext_image[k][l]*Law_filter[m][n]);
                        n += 1 ;
                    m += 1 ;
        return Law_applied;  
    
    Row_Size = 32;
    Column_Size = 32;
    Window_Size = 5;
    n1 = 2
    features = 25;
    samples = 32*32;
    
    
    L5 = np.array([1, 4, 6, 4, 1]);      #Level
    E5 = np.array([-1, -2, 0, 2, 1]);    #Edge
    S5 = np.array([-1, 0, 2, 0, -1]);    #Spot
    W5 = np.array([-1, 2, 0, -2, 1]);    #Wave
    R5 = np.array([1, -4, 6, -4, 1]);
    
    #L5S5 = creating_law_filters(L5,S5);
    L5W5 = creating_law_filters(L5,W5);
    
    #Reading the image
    for i in range(0,6000):
        Input_image = train_images[i,:,:,0]
        #Boundary extension of the image
        Ext_image = boundary_extension(Input_image,n1);
        #Applying the law filters
        
        train_images[i,:,:,0] = law_filter_application(Ext_image,L5S5);
        
    '''train_img1 = color.rgb2gray(io.imread('1.png'))
    Input_image = train_img1
        #Boundary extension of the image
    Ext_image = boundary_extension(Input_image,n1);
        #Applying the law filters
        
    train_img1 = law_filter_application(Ext_image,L5S5)
    plt.matshow(train_img1,cmap='gray') '''  
    
    

    pca_params=saab.multi_Saab_transform(train_images, train_labels,
    	                 kernel_sizes=kernel_sizes,
    	                 num_kernels=num_kernels,
    	                 energy_percent=energy_percent,
    	                 use_num_images=use_num_images,
    	                 use_classes=class_list)
    # save data
    fw=open('pca_params.pkl','wb')    
    pickle.dump(pca_params, fw)    
    fw.close()

    # load data
    fr=open('pca_params.pkl','rb')  
    data1=pickle.load(fr)
    print(data1)
    fr.close()
Example #20
0
with tf.name_scope("train"):
    optimizer = tf.train.AdamOptimizer(learning_rate)
    training_op = optimizer.minimize(loss)

with tf.name_scope('eval'):
    correct = tf.nn.in_top_k(logits, y, 1)
    accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

init = tf.global_variables_initializer()
saver = tf.train.Saver()

n_epochs = 50
batch_size = 500

data.import_data([1, 2, 3, 4, 5], ['test_batch'])

test_x_batch = data.testing_data[0]
test_y_batch = get_label(data.testing_data[1])

max_val = 0

with tf.Session() as sess:
    init.run()
    for epoch in range(n_epochs):
        x_batch = None
        y_batch = None
        for iteration in range(data.data_size() // batch_size):
            x_batch, y_batch = data.next_batch(batch_size)
            y_batch = get_label(y_batch)
            sess.run(training_op, feed_dict={X: x_batch, y: y_batch})
Example #21
0
import data
import pickle
import numpy as np
from sklearn import svm
from sklearn.decomposition import PCA


def loadfeature(feat):
    fr = open(feat, 'rb')
    feat = pickle.load(fr, encoding='latin1')
    fr.close()
    return feat


train_images, train_labels, test_images, test_labels, class_list = data.import_data(
    "0-9")
ensem = loadfeature('llsr1.pkl')
ensem = np.append(ensem, loadfeature('llsr_2.pkl'), axis=1)
ensem = np.append(ensem, loadfeature('llsr3.pkl'), axis=1)
ensem = np.append(ensem, loadfeature('llsr4.pkl'), axis=1)
ensem = np.append(ensem, loadfeature('llsr5.pkl'), axis=1)
ensem = np.append(ensem, loadfeature('llsr6.pkl'), axis=1)
ensem = np.append(ensem, loadfeature('llsr7.pkl'), axis=1)
ensem = np.append(ensem, loadfeature('llsr8.pkl'), axis=1)
ensem = np.append(ensem, loadfeature('llsr9.pkl'), axis=1)
ensem = np.append(ensem, loadfeature('llsr10.pkl'), axis=1)

pca = PCA(n_components=10)
X = pca.fit_transform(ensem)

Y = svm.SVC(gamma='scale')
Example #22
0
import pandas as pd
import numpy as np

import plotly.graph_objs as go
from plotly.subplots import make_subplots
import plotly.express as px

from teams import import_team_colors, import_sec_teams
from data import import_data, remove_null_columns, calc_team_sum_stats, calc_sec_season_stats

import Weather

teamColorsDict = import_team_colors()

df = import_data()
df_viewers, df_attend, df_rating = remove_null_columns(df)

sec_teams = import_sec_teams()
team_sum_stats, sec_sum_stats = calc_team_sum_stats(df, df_viewers, df_attend,
                                                    df_rating, sec_teams)

teams = np.unique(df[['visteamid', 'hometeamid']].dropna().values.ravel())

sec_season_stats = calc_sec_season_stats(df_viewers, df_attend, df_rating,
                                         team_sum_stats, sec_teams)

selectyear = df.loc[df.season.isna() == False].season.unique()
selectyeareDict = [{'label': x, 'value': x} for x in selectyear]

app = dash.Dash(name=__name__, external_stylesheets=[dbc.themes.SLATE])
def main():
    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        FLAGS.use_classes)
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    #Laws Filter creation
    L5 = np.array([1, 4, 6, 4, 1]).reshape(5, 1)
    E5 = np.array([-1, -2, 0, 2, 1]).reshape(5, 1)
    S5 = np.array([-1, 0, 2, 0, -1]).reshape(5, 1)
    R5 = np.array([-1, 2, 0, -2, 1]).reshape(5, 1)
    W5 = np.array([1, -4, 6, -4, 1]).reshape(5, 1)

    laws_filters = {'L5': L5, 'E5': E5, 'S5': S5, 'R5': R5, 'W5': W5}

    _2d_laws_filters = {}
    for k1, v1 in laws_filters.items():
        for k2, v2 in laws_filters.items():
            _2d_laws_filters[k1 + k2] = np.matmul(v1, v2.T)

    #boundary extension by pixel replication
    extended_images = []
    for img in train_images[:10000, :, :, 0]:
        new_img = np.pad(img, 2, 'reflect')
        extended_images.append(new_img)

    #Laws feature extraction
    final_images = []
    for img in extended_images:
        new_img = np.empty((1, 32, 32), np.uint8)
        for i in range(2, 32 + 2):
            for j in range(2, 32 + 2):
                new_img[0][i - 2][j - 2] = convolve(i, j,
                                                    _2d_laws_filters['S5R5'],
                                                    img)
        final_images.append(new_img)
    train_images = np.vstack(final_images)
    train_images = train_images.reshape(-1, 32, 32, 1)
    print(train_images.shape)

    kernel_sizes = saab.parse_list_string(FLAGS.kernel_sizes)
    if FLAGS.num_kernels:
        num_kernels = saab.parse_list_string(FLAGS.num_kernels)
    else:
        num_kernels = None
    energy_percent = FLAGS.energy_percent
    use_num_images = FLAGS.use_num_images
    print('Parameters:')
    print('use_classes:', class_list)
    print('Kernel_sizes:', kernel_sizes)
    print('Number_kernels:', num_kernels)
    print('Energy_percent:', energy_percent)
    print('Number_use_images:', use_num_images)

    pca_params = saab.multi_Saab_transform(train_images,
                                           train_labels,
                                           kernel_sizes=kernel_sizes,
                                           num_kernels=num_kernels,
                                           energy_percent=energy_percent,
                                           use_num_images=use_num_images,
                                           use_classes=class_list)

    #print(pca_params)
    # save data
    fw = open('pca_params_S5R5.pkl', 'wb')
    pickle.dump(pca_params, fw)
    fw.close()
def main():
    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        FLAGS.use_classes)
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    kernel_sizes = saab.parse_list_string(FLAGS.kernel_sizes)
    if FLAGS.num_kernels:
        num_kernels = saab.parse_list_string(FLAGS.num_kernels)
    else:
        num_kernels = None
    energy_percent = FLAGS.energy_percent
    use_num_images = FLAGS.use_num_images
    print('Parameters:')
    print('use_classes:', class_list)
    print('Kernel_sizes:', kernel_sizes)
    print('Number_kernels:', num_kernels)
    print('Energy_percent:', energy_percent)
    print('Number_use_images:', use_num_images)

    #Level_filter=np.array([1,4,6,4,1])
    #Edge_filter=np.array([-1,-2, 0 ,2, 1])
    #Spot_filter=np.array([-1, 0, 2, 0, -1])
    Wave_filter = np.array([-1, 2, 0, -2, 1])
    Ripple_filter = np.array([1, -4, 6, -4, 1])

    Filter = Wave_filter.T * Ripple_filter
    '''
    Filter1=Level_filter.T*Level_filter
    Filter3=Level_filter.T*Spot_filter
    Filter4=Level_filter.T*Wave_filter
    Filter5=Level_filter.T*Ripple_filter
    
    Filter6=Edge_filter.T*Level_filter
    Filter7=Edge_filter.T*Edge_filter
    Filter8=Edge_filter.T*Spot_filter
    Filter9=Edge_filter.T*Wave_filter
    Filter10=Edge_filter.T*Ripple_filter
    
    Filter11=Spot_filter.T*Level_filter
    Filter12=Spot_filter.T*Edge_filter
    Filter13=Spot_filter.T*Spot_filter
    Filter14=Spot_filter.T*Wave_filter
    Filter15=Spot_filter.T*Ripple_filter
    
    Filter16=Wave_filter.T*Level_filter
    Filter17=Wave_filter.T*Edge_filter
    Filter18=Wave_filter.T*Spot_filter
    Filter19=Wave_filter.T*Wave_filter
    Filter20=Wave_filter.T*Ripple_filter
    
    Filter21=Ripple_filter.T*Level_filter
    Filter22=Ripple_filter.T*Edge_filter
    Filter23=Ripple_filter.T*Spot_filter
    Filter24=Ripple_filter.T*Wave_filter
    Filter25=Ripple_filter.T*Ripple_filter
    '''

    altered_train = train_images.copy()
    for i in range(train_images.shape[0]):
        altered_train[i, :, :, 0] = cv2.filter2D(train_images[i, :, :, 0], -1,
                                                 Filter)
    train_images = altered_train.copy()

    pca_params = saab.multi_Saab_transform(train_images,
                                           train_labels,
                                           kernel_sizes=kernel_sizes,
                                           num_kernels=num_kernels,
                                           energy_percent=energy_percent,
                                           use_num_images=use_num_images,
                                           use_classes=class_list)
    # save data
    fw = open('pca_params_10.pkl', 'wb')
    pickle.dump(pca_params, fw)
    fw.close()

    # load data
    fr = open('pca_params_10.pkl', 'rb')
    data1 = pickle.load(fr)
    print(data1)
    fr.close()
Example #25
0
'''
By adidinchuk. [email protected].
https://github.com/adidinchuk/tf-neural-net
'''

import hyperparams as hp
import data as d
import numpy as np
import network as nwk

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

data = d.import_data('promoted.csv', headers=True)

features_numeric = d.zero_to_mean([[
    float(row[2].strip()) if row[2].strip() else 0.,
    float(row[3].strip()) if row[3].strip() else 0.,
    float(row[5].strip()) if row[5].strip() else 0.
] for row in data])
# normalize numeric features
features_numeric = np.transpose(d.normalize(features_numeric))

features_string = [[
    row[6].strip() if row[6].strip() else 'N/A',
    row[7].strip() if row[7].strip() else 'N/A'
] for row in data]

# expand the categorical columns into binary categories
row4_expansion, row4_translation = d.expand_categorical_feature(
    np.transpose(features_string)[0])
# -*- coding: utf-8 -*-
"""
Created on Fri May  4 21:38:28 2018

@author: wael
"""

from data import import_data
from splitdataset import splitdataset
from model_performance import perfor
from prediction import predict
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib

data = import_data()
X, Y, X_train, X_test, y_train, y_test = splitdataset(data)

# Decision tree with entropy
clf_object = DecisionTreeClassifier(criterion="entropy",
                                    random_state=100,
                                    max_depth=6,
                                    min_samples_leaf=5)
clf_object.fit(X_train, y_train)
# Performing training
# Predicton on test
y_pred, rfecv = predict(X_test, clf_object, X_train, y_train)
perfor(y_test, y_pred, rfecv, X_train)
joblib.dump(clf_object, 'pfa.pkl')
Example #27
0
    # manually loaded truck beds
    truck_1.truck_bed = [1, 2, 6, 7, 11, 25, 26, 29, 30, 31, 32, 33]
    truck_2.truck_bed = [
        3, 5, 13, 14, 15, 16, 18, 19, 20, 21, 34, 36, 37, 38, 39, 40
    ]
    truck_3.truck_bed = [4, 8, 9, 10, 12, 17, 22, 23, 24, 27, 28, 35]

    # for each package, add the address's hub number from a hashmap using the address as the key
    # fill each truck bed with respective packages
    # Big O is O(n)
    for p in Package.package_object_list:
        p.address_hub_number = data.vertices_hashmap.get(p.address)
        Truck.add_package_start_time(truck_1, p)
        Truck.add_package_start_time(truck_2, p)
        Truck.add_package_start_time(truck_3, p)

    # for each package id in truck bed find the address hub number and add it to a list in truck class
    # run deliver function, see truck module
    # big O is O(n^4) because deliver is O(n^3)
    for truck in Truck.truck_object_list:
        truck.get_hub_vertices()
        truck.deliver(0, data.distance_list, truck.start_time)

    # driver of truck 1 must return to hub to drive truck three
    truck_1.mileage += data.distance_list[11][0]


data.import_data()
main()
user_interface.user()
Example #28
0
def main():
    # read data
    train_images, train_labels, test_images, test_labels, class_list = data.import_data(
        "0-9")
    print('Training image size:', train_images.shape)
    print('Testing_image size:', test_images.shape)

    # load feature
    fr = open('feat.pkl', 'rb')
    feat = pickle.load(fr, encoding='Latin')
    fr.close()
    feature = feat['training_feature']
    feature = feature.reshape(feature.shape[0], -1)
    print("S4 shape:", feature.shape)
    print('--------Finish Feature Extraction subnet--------')

    # feature normalization
    std_var = (np.std(feature, axis=0)).reshape(1, -1)
    feature = feature / std_var

    num_clusters = [200, 100, 10]
    use_classes = class_list
    weights = {}
    bias = {}
    for k in range(len(num_clusters)):
        if k != len(num_clusters) - 1:
            num_clus = int(num_clusters[k] / len(use_classes))
            labels = np.zeros((feature.shape[0], num_clusters[k]))
            for n in range(len(use_classes)):
                idx = (train_labels == use_classes[n])
                index = np.where(idx == True)[0]
                feature_special = np.zeros((index.shape[0], feature.shape[1]))
                for i in range(index.shape[0]):
                    feature_special[i] = feature[index[i]]
                kmeans = KMeans(n_clusters=num_clus).fit(feature_special)
                pred_labels = kmeans.labels_
                for i in range(feature_special.shape[0]):
                    labels[index[i], pred_labels[i] + n * num_clus] = 1

            # least square regression
            A = np.ones((feature.shape[0], 1))
            feature = np.concatenate((A, feature), axis=1)
            weight = np.matmul(LA.pinv(feature), labels)
            feature = np.matmul(feature, weight)
            weights['%d LLSR weight' % k] = weight[1:weight.shape[0]]
            bias['%d LLSR bias' % k] = weight[0].reshape(1, -1)
            print(k, ' layer LSR weight shape:', weight.shape)
            print(k, ' layer LSR output shape:', feature.shape)

            pred_labels = np.argmax(feature, axis=1)
            num_clas = np.zeros((num_clusters[k], len(use_classes)))
            for i in range(num_clusters[k]):
                for t in range(len(use_classes)):
                    for j in range(feature.shape[0]):
                        if pred_labels[j] == i and train_labels[j] == t:
                            num_clas[i, t] += 1
            acc_train = np.sum(np.amax(num_clas, axis=1)) / feature.shape[0]
            print(k, ' layer LSR training acc is {}'.format(acc_train))

            # Relu
            for i in range(feature.shape[0]):
                for j in range(feature.shape[1]):
                    if feature[i, j] < 0:
                        feature[i, j] = 0
        else:
            # least square regression
            labels = keras.utils.to_categorical(train_labels, 10)
            A = np.ones((feature.shape[0], 1))
            feature = np.concatenate((A, feature), axis=1)
            weight = np.matmul(LA.pinv(feature), labels)
            feature = np.matmul(feature, weight)
            weights['%d LLSR weight' % k] = weight[1:weight.shape[0]]
            bias['%d LLSR bias' % k] = weight[0].reshape(1, -1)
            print(k, ' layer LSR weight shape:', weight.shape)
            print(k, ' layer LSR output shape:', feature.shape)

            pred_labels = np.argmax(feature, axis=1)
            acc_train = sklearn.metrics.accuracy_score(train_labels,
                                                       pred_labels)
            print('training acc is {}'.format(acc_train))
    # save data
    fw = open('llsr_weights.pkl', 'wb')
    pickle.dump(weights, fw)
    fw.close()
    fw = open('llsr_bias.pkl', 'wb')
    pickle.dump(bias, fw)
    fw.close()
from sklearn import metrics
from tensorflow.python.platform import flags
import pickle
import data
import saab
import cv2

flags.DEFINE_string("output_path", None, "The output dir to save params")
flags.DEFINE_string("use_classes", "0-9", "Supported format: 0,1,5-9")
flags.DEFINE_string("kernel_sizes", "3,5", "Kernels size for each stage. Format: '3,3'")
flags.DEFINE_string("num_kernels", "5,15", "Num of kernels for each stage. Format: '4,10'")
flags.DEFINE_float("energy_percent", None, "Energy to be preserved in each stage")
flags.DEFINE_integer("use_num_images",3000, "Num of images used for training")
FLAGS = flags.FLAGS

train_images, train_labels, test_images, test_labels, class_list = data.import_data(FLAGS.use_classes)

fr=open('ffcnn_feature_1.pkl','rb')  
feature_1=pickle.load(fr)
fr.close()

fr=open('ffcnn_feature_2.pkl','rb')  
feature_2=pickle.load(fr)
fr.close()

fr=open('ffcnn_feature_3.pkl','rb')  
feature_3=pickle.load(fr)
fr.close()

fr=open('ffcnn_feature_4.pkl','rb')  
feature_4=pickle.load(fr)