def main(input_file, model_path): batch_size = 128 nb_classes = 62 # A-Z, a-z and 0-9 nb_epoch = 2 # Input image dimensions img_rows, img_cols = 32, 32 # Path of data files path = input_file ### PREDICTION ### # # Load the model with the highest validation accuracy # model.load_weights("best.kerasModelWeights") # Load Kaggle test set X_test = np.load(path + "/testPreproc_" + str(img_rows) + "_" + str(img_cols) + ".npy") print X_test.shape # Load the preprocessed data and labels X_train_all = np.load(path + "/trainPreproc_" + str(img_rows) + "_" + str(img_cols) + ".npy") Y_train_all = np.load(path + "/labelsPreproc.npy") X_train, X_val, Y_train, Y_val = \ train_test_split(X_train_all, Y_train_all, test_size=0.25, stratify=np.argmax(Y_train_all, axis=1)) print X_train.shape Y_val = convert_(Y_val) X_train = X_train.reshape((-1, 1, 32, 32)) # # # input shape for neural network # labels = labels.astype(np.uint8) X_val = X_val.reshape((-1, 1, 32, 32)) # # # input shape for neural network Y_val = Y_val.astype(np.uint8) # input_image_vector_shape = (None, 1, 32, 32) # ''' @description: Two layer convolutional neural network ''' #input layer input_layer = ('input', layers.InputLayer) # fist layer design first_layer_conv_filter = layers.Conv2DLayer first_layer_pool_filter = layers.MaxPool2DLayer conv_filter = ('conv2d1', first_layer_conv_filter) pool_filter = ('maxpool1', first_layer_pool_filter) # second layer design second_layer_conv_filter = layers.Conv2DLayer second_layer_pool_filter = layers.MaxPool2DLayer conv_filter2 = ('conv2d2', second_layer_conv_filter) pool_filter2 = ('maxpool2', second_layer_pool_filter) # dropout rates ( used for regularization ) dropout_layer = layers.DropoutLayer drop1 = 0.5 drop2 = 0.5 first_drop_layer = ('dropout1', dropout_layer) second_drop_layer = ('dropout2', dropout_layer) # # network parameters design_layers = [ input_layer, conv_filter, pool_filter, conv_filter2, pool_filter2, first_drop_layer, ('dense', layers.DenseLayer), second_drop_layer, ('output', layers.DenseLayer) ] # Neural net object instance net1 = NeuralNet( # declare convolutional neural network layers # convolutional mapping and pooling window sized will be declared # and set to various sizes layers=design_layers, # input layer # vector size of image will be taken as 28 x 28 input_shape=input_image_vector_shape, # first layer convolutional filter # mapping layer set at 5 x 5 conv2d1_num_filters=32, conv2d1_filter_size=(5, 5), conv2d1_nonlinearity=lasagne.nonlinearities.rectify, conv2d1_W=lasagne.init.HeNormal(gain='relu'), # first layer convolutional pool filter # mapping layer set at 2 x 2 maxpool1_pool_size=(2, 2), # second layer convolutional filter # mapping layer set at 5 x 5 conv2d2_num_filters=32, conv2d2_filter_size=(5, 5), conv2d2_nonlinearity=lasagne.nonlinearities.rectify, # second layer convolutional pool filter # mapping layer set at 2 x 2 maxpool2_pool_size=(2, 2), dropout1_p=drop1, # hidden unit density dense_num_units=512, dense_nonlinearity=lasagne.nonlinearities.rectify, # dropout2 dropout2_p=drop2, # output output_nonlinearity=lasagne.nonlinearities.softmax, #corresponds to the amount of target labels to compare to output_num_units=62, # optimization method params # NOTE: Different momentum steepest gradient methods yield varied # results. update=nesterov_momentum, # 69 update_learning_rate=0.01, update_momentum=0.078, # update_learning_rate=1e-4, # update_momentum=0.9, # max_epochs=1000, # update_learning_rate=0.1, # update_momentum=0.003, max_epochs=1000, verbose=1, ) print "Loading Neural Net Parameters..." net1.initialize_layers() net1.load_weights_from('{}_weightfile.w'.format(model_path)) ''' new_twoLayer_paramfile.w new_twoLayer_weightfile.w ''' net1.load_params_from('{}_paramfile.w'.format(model_path)) from sklearn.metrics import classification_report, accuracy_score, confusion_matrix print 'Testing...' y_true, y_pred = Y_val, net1.predict(X_val) # Get our predictions print(classification_report(y_true, y_pred)) # Classification on each digit print net1.predict(X_val) print Y_val a = confusion_matrix(Y_val, net1.predict(X_val)) b = np.trace(a) print 'Training Accuracy: ' + str(float(b) / float(np.sum(a)))
dropout_p=0.5, output_num_units=num_classes, output_nonlinearity=lasagne.nonlinearities.softmax, output_W = GlorotUniform(gain = 1.0), # ----------------------- ConvNet Params ------------------------------------------- update = nesterov_momentum, update_learning_rate = learning_rate, update_momentum = momentum, max_epochs = num_epochs, verbose = 1, ) tic = time.time() for i in range(12): convNet.fit(dataset['X_train'], dataset['Y_train']) fl = './model1/saved_model_data' + str(i+1) + '.npz' convNet.save_weights_to(fl) print 'Model saved to file :- ', fl toc = time.time() fl = './model1/saved_model_data' + str(6) + '.npz' convNet.load_weights_from(fl) y_pred = convNet.predict(dataset['X_test']) print classification_report(Y_test, y_pred) print accuracy_score(Y_test, y_pred) print 'Time taken to train the data :- ', toc-tic, 'seconds'
def recunstruct_cae(folder_path): cnn = NeuralNet() cnn.load_params_from(folder_path + CONV_AE_PARAMS_PKL) cnn.load_weights_from(folder_path + CONV_AE_NP) return cnn
update_learning_rate=theano.shared(float32(0.05)), update_momentum=theano.shared(float32(0.9)), regression=True, on_epoch_finished=[ AdjustVariable('update_learning_rate', start=0.05, stop=0.0001), AdjustVariable('update_momentum', start=0.9, stop=0.999), ], batch_iterator_train=BatchIterator(batch_size=128), max_epochs=1200, verbose=1, ) pretrain_net=joblib.load('pretrain_net.pkl') final_net.load_weights_from(pretrain_net) no_of_examples_ground_truth=np.load('no_of_examples_ground_truth.npy') X2=np.memmap('XG_mat.npy', dtype='float32', mode='r', shape=(no_of_examples_ground_truth,3,image_size,image_size)) y2=np.memmap('yG_mat.npy', dtype='float32', mode='r', shape=(no_of_examples_ground_truth,map_size*map_size)) final_net.fit(X2,y2) joblib.dump(final_net, 'final_net.pkl') ############################################################################### y=final_net.predict(X2) sio.savemat('prediction_vector.mat', {'y':y})
def main(input_file, model_path): batch_size = 128 nb_classes = 62 # A-Z, a-z and 0-9 nb_epoch = 2 # Input image dimensions img_rows, img_cols = 32, 32 # Path of data files path = input_file ### PREDICTION ### # Load the model with the highest validation accuracy # model.load_weights("best.kerasModelWeights") # Load Kaggle test set X_test = np.load(path + "/testPreproc_" + str(img_rows) + "_" + str(img_cols) + ".npy") print X_test.shape # Load the preprocessed data and labels X_train_all = np.load(path + "/trainPreproc_" + str(img_rows) + "_" + str(img_cols) + ".npy") Y_train_all = np.load(path + "/labelsPreproc.npy") X_train, X_val, Y_train, Y_val = \ train_test_split(X_train_all, Y_train_all, test_size=0.25, stratify=np.argmax(Y_train_all, axis=1)) print X_train.shape Y_val = convert_(Y_val) X_train = X_train.reshape((-1, 1, 32, 32)) # # # input shape for neural network # labels = labels.astype(np.uint8) X_val = X_val.reshape((-1, 1, 32, 32)) # # # input shape for neural network Y_val = Y_val.astype(np.uint8) # input_image_vector_shape = (None, 1, 32, 32) net1 = NeuralNet( layers=[ ('input', layers.InputLayer), ('conv2d1', layers.Conv2DLayer), ('maxpool1', layers.MaxPool2DLayer), ('conv2d2', layers.Conv2DLayer), ('maxpool2', layers.MaxPool2DLayer), ('conv2d3', layers.Conv2DLayer), ('maxpool3', layers.MaxPool2DLayer), # ('conv2d4', layers.Conv2DLayer), # ('maxpool4', layers.MaxPool2DLayer), ('dropout1', layers.DropoutLayer), ('dropout2', layers.DropoutLayer), ('dense', layers.DenseLayer), # ('dense2', layers.DenseLayer), ('output', layers.DenseLayer), ], input_shape=input_image_vector_shape, conv2d1_num_filters=128, conv2d1_filter_size=(3, 3), conv2d1_nonlinearity=lasagne.nonlinearities.tanh, conv2d1_W=lasagne.init.GlorotUniform(), conv2d1_pad=(2, 2), maxpool1_pool_size=(2, 2), conv2d2_num_filters=256, conv2d2_filter_size=(3, 3), conv2d2_nonlinearity=lasagne.nonlinearities.rectify, conv2d2_pad=(2, 2), maxpool2_pool_size=(2, 2), conv2d3_num_filters=512, conv2d3_filter_size=(3, 3), conv2d3_nonlinearity=lasagne.nonlinearities.rectify, conv2d3_pad=(2, 2), maxpool3_pool_size=(2, 2), dropout1_p=0.5, dropout2_p=0.5, dense_num_units=8192, dense_nonlinearity=lasagne.nonlinearities.rectify, # dense2_num_units = 16, # dense2_nonlinearity = lasagne.nonlinearities.rectify, output_nonlinearity=lasagne.nonlinearities.softmax, output_num_units=62, update=momentum, # 75.5 with tanh init dense num = 256% update_learning_rate=0.03, update_momentum=0.8, max_epochs=1000, verbose=1, ) print "Loading Neural Net Parameters..." net1.initialize_layers() net1.load_weights_from('{}_weightfile.w'.format(model_path)) net1.load_params_from('{}_paramfile.w'.format(model_path)) from sklearn.metrics import classification_report, accuracy_score, confusion_matrix print 'Testing...' y_true, y_pred = Y_val, net1.predict(X_val) # Get our predictions print(classification_report(y_true, y_pred)) # Classification on each digit print net1.predict(X_val) print Y_val a = confusion_matrix(Y_val, net1.predict(X_val)) b = np.trace(a) print 'Training Accuracy: ' + str(float(b) / float(np.sum(a)))
('dense1', DenseLayer), ('dropout1', DropoutLayer), ('dense2', DenseLayer), ('dropout2', DropoutLayer), ('dense3', DenseLayer), ('output', DenseLayer)], input_shape=(None, num_features), dense1_num_units=512, dropout1_p=0.5, dense2_num_units=512, dropout2_p=0.5, dense3_num_units=512, output_num_units=num_classes, output_nonlinearity=softmax, update=nesterov_momentum, eval_size=0.2, verbose=1, update_learning_rate=theano.shared(float32(0.01)), update_momentum=theano.shared(float32(0.9)), on_epoch_finished=[ AdjustVariable('update_learning_rate', start=0.01, stop=0.00001), AdjustVariable('update_momentum', start=0.9, stop=0.999), EarlyStopping(), ], max_epochs=10000,) net0.initialize() # do_fit(net0, 'data/train_impu_norm_shuf.csv', n_iter=1) net0.load_weights_from('nn_weights') RainCompetition.do_predict(net0, RainCompetition.__data__['test_normalized'], 'data/rain_nn_pred.csv')
hidden1_W=GlorotUniform('relu'), # hidden2_num_units=256, hidden2_nonlinearity=lasagne.nonlinearities.rectify, dropout_p=0.5, output_num_units=num_classes, output_nonlinearity=lasagne.nonlinearities.softmax, output_W=GlorotUniform(gain=1.0), # ----------------------- ConvNet Params ------------------------------------------- update=nesterov_momentum, update_learning_rate=learning_rate, update_momentum=momentum, max_epochs=num_epochs, verbose=1, ) tic = time.time() for i in range(12): convNet.fit(dataset['X_train'], dataset['Y_train']) fl = './model1/saved_model_data' + str(i + 1) + '.npz' convNet.save_weights_to(fl) print 'Model saved to file :- ', fl toc = time.time() fl = './model1/saved_model_data' + str(6) + '.npz' convNet.load_weights_from(fl) y_pred = convNet.predict(dataset['X_test']) print classification_report(Y_test, y_pred) print accuracy_score(Y_test, y_pred) print 'Time taken to train the data :- ', toc - tic, 'seconds'