def test_diamond(self, NeuralNet): input = Mock(__name__='InputLayer', __bases__=(InputLayer,)) hidden1, hidden2, concat, output = [ Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(4)] nn = NeuralNet( layers=[ ('input', input), ('hidden1', hidden1), ('hidden2', hidden2), ('concat', concat), ('output', output), ], input_shape=(10, 10), hidden2_incoming='input', concat_incomings=['hidden1', 'hidden2'], ) nn.initialize_layers(nn.layers) input.assert_called_with(name='input', shape=(10, 10)) hidden1.assert_called_with(incoming=input.return_value, name='hidden1') hidden2.assert_called_with(incoming=input.return_value, name='hidden2') concat.assert_called_with( incomings=[hidden1.return_value, hidden2.return_value], name='concat' ) output.assert_called_with(incoming=concat.return_value, name='output')
def test_diamond(self, NeuralNet): input, hidden1, hidden2, concat, output = ( Mock(), Mock(), Mock(), Mock(), Mock()) nn = NeuralNet( layers=[ ('input', input), ('hidden1', hidden1), ('hidden2', hidden2), ('concat', concat), ('output', output), ], input_shape=(10, 10), hidden2_incoming='input', concat_incoming=['hidden1', 'hidden2'], ) nn.initialize_layers(nn.layers) input.assert_called_with(name='input', shape=(10, 10)) hidden1.assert_called_with(incoming=input.return_value, name='hidden1') hidden2.assert_called_with(incoming=input.return_value, name='hidden2') concat.assert_called_with( incoming=[hidden1.return_value, hidden2.return_value], name='concat' ) output.assert_called_with(incoming=concat.return_value, name='output')
def test_diamond(self, NeuralNet): input = Mock(__name__='InputLayer', __bases__=(InputLayer, )) hidden1, hidden2, concat, output = [ Mock(__name__='MockLayer', __bases__=(Layer, )) for i in range(4) ] nn = NeuralNet( layers=[ ('input', input), ('hidden1', hidden1), ('hidden2', hidden2), ('concat', concat), ('output', output), ], input_shape=(10, 10), hidden2_incoming='input', concat_incomings=['hidden1', 'hidden2'], ) nn.initialize_layers(nn.layers) input.assert_called_with(name='input', shape=(10, 10)) hidden1.assert_called_with(incoming=input.return_value, name='hidden1') hidden2.assert_called_with(incoming=input.return_value, name='hidden2') concat.assert_called_with( incomings=[hidden1.return_value, hidden2.return_value], name='concat') output.assert_called_with(incoming=concat.return_value, name='output')
def test_initialization_legacy_with_unicode_names(self, NeuralNet): # Test whether legacy initialization is triggered; if not, # raises error. input = Mock(__name__="InputLayer", __bases__=(InputLayer,)) hidden1, hidden2, output = [Mock(__name__="MockLayer", __bases__=(Layer,)) for i in range(3)] nn = NeuralNet( layers=[(u"input", input), (u"hidden1", hidden1), (u"hidden2", hidden2), (u"output", output)], input_shape=(10, 10), hidden1_some="param", ) nn.initialize_layers()
def test_initialization_with_mask_input(self, NeuralNet): nn = NeuralNet( layers=[ (InputLayer, {'shape': (None, 20, 32), 'name': 'l_in'}), (InputLayer, {'shape': (None, 20), 'name': 'l_mask'}), (RecurrentLayer, {'incoming': 'l_in', 'mask_input': 'l_mask', 'num_units': 2, 'name': 'l_rec'}), ]) nn.initialize_layers() assert nn.layers_['l_rec'].mask_incoming_index == 1
def test_initialization(self, NeuralNet): input, hidden1, hidden2, output = Mock(), Mock(), Mock(), Mock() nn = NeuralNet( layers=[ (input, {'shape': (10, 10), 'name': 'input'}), (hidden1, {'some': 'param', 'another': 'param'}), (hidden2, {}), (output, {'name': 'output'}), ], input_shape=(10, 10), mock1_some='iwin', ) out = nn.initialize_layers(nn.layers) input.assert_called_with( name='input', shape=(10, 10)) nn.layers_['input'] is input.return_value hidden1.assert_called_with( incoming=input.return_value, name='mock1', some='iwin', another='param') nn.layers_['mock1'] is hidden1.return_value hidden2.assert_called_with( incoming=hidden1.return_value, name='mock2') nn.layers_['mock2'] is hidden2.return_value output.assert_called_with( incoming=hidden2.return_value, name='output') assert out is nn.layers_['output']
def test_initialization(self, NeuralNet): input, hidden1, hidden2, output = Mock(), Mock(), Mock(), Mock() nn = NeuralNet( layers=[ ('input', input), ('hidden1', hidden1), ('hidden2', hidden2), ('output', output), ], input_shape=(10, 10), hidden1_some='param', ) out = nn.initialize_layers(nn.layers) input.assert_called_with(name='input', shape=(10, 10)) nn.layers_['input'] is input.return_value hidden1.assert_called_with(input.return_value, name='hidden1', some='param') nn.layers_['hidden1'] is hidden1.return_value hidden2.assert_called_with(hidden1.return_value, name='hidden2') nn.layers_['hidden2'] is hidden2.return_value output.assert_called_with(hidden2.return_value, name='output') assert out is nn.layers_['output']
def test_initialization_legacy(self, NeuralNet): input, hidden1, hidden2, output = Mock(), Mock(), Mock(), Mock() nn = NeuralNet( layers=[ ('input', input), ('hidden1', hidden1), ('hidden2', hidden2), ('output', output), ], input_shape=(10, 10), hidden1_some='param', ) out = nn.initialize_layers(nn.layers) input.assert_called_with( name='input', shape=(10, 10)) nn.layers_['input'] is input.return_value hidden1.assert_called_with( incoming=input.return_value, name='hidden1', some='param') nn.layers_['hidden1'] is hidden1.return_value hidden2.assert_called_with( incoming=hidden1.return_value, name='hidden2') nn.layers_['hidden2'] is hidden2.return_value output.assert_called_with( incoming=hidden2.return_value, name='output') assert out is nn.layers_['output']
def test_initialization_legacy(self, NeuralNet): input = Mock(__name__='InputLayer', __bases__=(InputLayer,)) hidden1, hidden2, output = [ Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)] nn = NeuralNet( layers=[ ('input', input), ('hidden1', hidden1), ('hidden2', hidden2), ('output', output), ], input_shape=(10, 10), hidden1_some='param', ) out = nn.initialize_layers(nn.layers) input.assert_called_with( name='input', shape=(10, 10)) assert nn.layers_['input'] is input.return_value hidden1.assert_called_with( incoming=input.return_value, name='hidden1', some='param') assert nn.layers_['hidden1'] is hidden1.return_value hidden2.assert_called_with( incoming=hidden1.return_value, name='hidden2') assert nn.layers_['hidden2'] is hidden2.return_value output.assert_called_with( incoming=hidden2.return_value, name='output') assert out[0] is nn.layers_['output']
def test_initialization_with_tuples(self, NeuralNet): input = Mock(__name__='InputLayer', __bases__=(InputLayer,)) hidden1, hidden2, output = [ Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)] nn = NeuralNet( layers=[ (input, {'shape': (10, 10), 'name': 'input'}), (hidden1, {'some': 'param', 'another': 'param'}), (hidden2, {}), (output, {'name': 'output'}), ], input_shape=(10, 10), mock1_some='iwin', ) out = nn.initialize_layers(nn.layers) input.assert_called_with( name='input', shape=(10, 10)) assert nn.layers_['input'] is input.return_value hidden1.assert_called_with( incoming=input.return_value, name='mock1', some='iwin', another='param') assert nn.layers_['mock1'] is hidden1.return_value hidden2.assert_called_with( incoming=hidden1.return_value, name='mock2') assert nn.layers_['mock2'] is hidden2.return_value output.assert_called_with( incoming=hidden2.return_value, name='output') assert out[0] is nn.layers_['output']
def test_initialization_with_layer_instance(self, NeuralNet): layer1 = InputLayer(shape=(128, 13)) # name will be assigned layer2 = DenseLayer(layer1, name='output', num_units=2) # has name nn = NeuralNet(layers=layer2) out = nn.initialize_layers() assert nn.layers_['output'] == layer2 == out assert nn.layers_['input0'] == layer1
def test_initialization_with_layer_instance(self, NeuralNet): layer1 = InputLayer(shape=(128, 13)) # name will be assigned layer2 = DenseLayer(layer1, name='output', num_units=2) # has name nn = NeuralNet(layers=layer2) out = nn.initialize_layers() assert nn.layers_['output'] == layer2 == out[0] assert nn.layers_['input0'] == layer1
def test_initialization_legacy(self, NeuralNet): input = Mock(__name__='InputLayer', __bases__=(InputLayer, )) hidden1, hidden2, output = [ Mock(__name__='MockLayer', __bases__=(Layer, )) for i in range(3) ] nn = NeuralNet( layers=[ ('input', input), ('hidden1', hidden1), ('hidden2', hidden2), ('output', output), ], input_shape=(10, 10), hidden1_some='param', ) out = nn.initialize_layers(nn.layers) input.assert_called_with(name='input', shape=(10, 10)) nn.layers_['input'] is input.return_value hidden1.assert_called_with(incoming=input.return_value, name='hidden1', some='param') nn.layers_['hidden1'] is hidden1.return_value hidden2.assert_called_with(incoming=hidden1.return_value, name='hidden2') nn.layers_['hidden2'] is hidden2.return_value output.assert_called_with(incoming=hidden2.return_value, name='output') assert out is nn.layers_['output']
def test_initialization_with_tuples(self, NeuralNet): input = Mock(__name__="InputLayer", __bases__=(InputLayer,)) hidden1, hidden2, output = [Mock(__name__="MockLayer", __bases__=(Layer,)) for i in range(3)] nn = NeuralNet( layers=[ (input, {"shape": (10, 10), "name": "input"}), (hidden1, {"some": "param", "another": "param"}), (hidden2, {}), (output, {"name": "output"}), ], input_shape=(10, 10), mock1_some="iwin", ) out = nn.initialize_layers(nn.layers) input.assert_called_with(name="input", shape=(10, 10)) assert nn.layers_["input"] is input.return_value hidden1.assert_called_with(incoming=input.return_value, name="mock1", some="iwin", another="param") assert nn.layers_["mock1"] is hidden1.return_value hidden2.assert_called_with(incoming=hidden1.return_value, name="mock2") assert nn.layers_["mock2"] is hidden2.return_value output.assert_called_with(incoming=hidden2.return_value, name="output") assert out is nn.layers_["output"]
def test_initialization_with_tuples(self, NeuralNet): input = Mock(__name__='InputLayer', __bases__=(InputLayer,)) hidden1, hidden2, output = [ Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)] nn = NeuralNet( layers=[ (input, {'shape': (10, 10), 'name': 'input'}), (hidden1, {'some': 'param', 'another': 'param'}), (hidden2, {}), (output, {'name': 'output'}), ], input_shape=(10, 10), mock1_some='iwin', ) out = nn.initialize_layers(nn.layers) input.assert_called_with( name='input', shape=(10, 10)) assert nn.layers_['input'] is input.return_value hidden1.assert_called_with( incoming=input.return_value, name='mock1', some='iwin', another='param') assert nn.layers_['mock1'] is hidden1.return_value hidden2.assert_called_with( incoming=hidden1.return_value, name='mock2') assert nn.layers_['mock2'] is hidden2.return_value output.assert_called_with( incoming=hidden2.return_value, name='output') assert out is nn.layers_['output']
def test_initialization_legacy_with_unicode_names(self, NeuralNet): # Test whether legacy initialization is triggered; if not, # raises error. input = Mock(__name__='InputLayer', __bases__=(InputLayer,)) hidden1, hidden2, output = [ Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)] nn = NeuralNet( layers=[ (u'input', input), (u'hidden1', hidden1), (u'hidden2', hidden2), (u'output', output), ], input_shape=(10, 10), hidden1_some='param', ) nn.initialize_layers()
def test_initializtion_with_tuples_resolve_layers(self, NeuralNet): nn = NeuralNet( layers=[ ('lasagne.layers.InputLayer', {'shape': (None, 10)}), ('lasagne.layers.DenseLayer', {'num_units': 33}), ], ) out, = nn.initialize_layers(nn.layers) assert out.num_units == 33
def test_legacy_initialization_with_mask_input(self, NeuralNet): nn = NeuralNet( layers=[ ('l_in', InputLayer), ('l_mask', InputLayer), ('l_rec', RecurrentLayer), ], l_in_shape=(None, 20, 32), l_in_name='l_in', l_mask_shape=(None, 20), l_mask_name='l_mask', l_rec_incoming='l_in', l_rec_mask_input='l_mask', l_rec_num_units=2, l_rec_name='l_rec', ) nn.initialize_layers() assert nn.layers_['l_rec'].mask_incoming_index == 1
def test_initializtion_legacy_resolve_layers(self, NeuralNet): nn = NeuralNet( layers=[ ('input', 'lasagne.layers.InputLayer'), ('output', 'lasagne.layers.DenseLayer'), ], input_shape=(None, 10), output_num_units=33, ) out, = nn.initialize_layers(nn.layers) assert out.num_units == 33
def test_diamond(self, NeuralNet): input = Mock(__name__="InputLayer", __bases__=(InputLayer,)) hidden1, hidden2, concat, output = [Mock(__name__="MockLayer", __bases__=(Layer,)) for i in range(4)] nn = NeuralNet( layers=[ ("input", input), ("hidden1", hidden1), ("hidden2", hidden2), ("concat", concat), ("output", output), ], input_shape=(10, 10), hidden2_incoming="input", concat_incomings=["hidden1", "hidden2"], ) nn.initialize_layers(nn.layers) input.assert_called_with(name="input", shape=(10, 10)) hidden1.assert_called_with(incoming=input.return_value, name="hidden1") hidden2.assert_called_with(incoming=input.return_value, name="hidden2") concat.assert_called_with(incomings=[hidden1.return_value, hidden2.return_value], name="concat") output.assert_called_with(incoming=concat.return_value, name="output")
def test_diamond(self, NeuralNet): input, hidden1, hidden2, concat, output = (Mock(), Mock(), Mock(), Mock(), Mock()) nn = NeuralNet( layers=[ ('input', input), ('hidden1', hidden1), ('hidden2', hidden2), ('concat', concat), ('output', output), ], input_shape=(10, 10), hidden2_incoming='input', concat_incoming=['hidden1', 'hidden2'], ) nn.initialize_layers(nn.layers) input.assert_called_with(name='input', shape=(10, 10)) hidden1.assert_called_with(input.return_value, name='hidden1') hidden2.assert_called_with(input.return_value, name='hidden2') concat.assert_called_with([hidden1.return_value, hidden2.return_value], name='concat') output.assert_called_with(concat.return_value, name='output')
def test_initialization_legacy(self, NeuralNet): input = Mock(__name__="InputLayer", __bases__=(InputLayer,)) hidden1, hidden2, output = [Mock(__name__="MockLayer", __bases__=(Layer,)) for i in range(3)] nn = NeuralNet( layers=[("input", input), ("hidden1", hidden1), ("hidden2", hidden2), ("output", output)], input_shape=(10, 10), hidden1_some="param", ) out = nn.initialize_layers(nn.layers) input.assert_called_with(name="input", shape=(10, 10)) assert nn.layers_["input"] is input.return_value hidden1.assert_called_with(incoming=input.return_value, name="hidden1", some="param") assert nn.layers_["hidden1"] is hidden1.return_value hidden2.assert_called_with(incoming=hidden1.return_value, name="hidden2") assert nn.layers_["hidden2"] is hidden2.return_value output.assert_called_with(incoming=hidden2.return_value, name="output") assert out is nn.layers_["output"]
def test_initialization(self, NeuralNet): input, hidden1, hidden2, output = Mock(), Mock(), Mock(), Mock() nn = NeuralNet( layers=[ (input, { 'shape': (10, 10), 'name': 'input' }), (hidden1, { 'some': 'param', 'another': 'param' }), (hidden2, {}), (output, { 'name': 'output' }), ], input_shape=(10, 10), mock1_some='iwin', ) out = nn.initialize_layers(nn.layers) input.assert_called_with(name='input', shape=(10, 10)) nn.layers_['input'] is input.return_value hidden1.assert_called_with(incoming=input.return_value, name='mock1', some='iwin', another='param') nn.layers_['mock1'] is hidden1.return_value hidden2.assert_called_with(incoming=hidden1.return_value, name='mock2') nn.layers_['mock2'] is hidden2.return_value output.assert_called_with(incoming=hidden2.return_value, name='output') assert out is nn.layers_['output']
def test_initialization_with_layer_instance_bad_params(self, NeuralNet): layer = DenseLayer(InputLayer(shape=(128, 13)), num_units=2) nn = NeuralNet(layers=layer, dense1_num_units=3) with pytest.raises(ValueError): nn.initialize_layers()
def main(input_file, model_path): batch_size = 128 nb_classes = 62 # A-Z, a-z and 0-9 nb_epoch = 2 # Input image dimensions img_rows, img_cols = 32, 32 # Path of data files path = input_file ### PREDICTION ### # Load the model with the highest validation accuracy # model.load_weights("best.kerasModelWeights") # Load Kaggle test set X_test = np.load(path + "/testPreproc_" + str(img_rows) + "_" + str(img_cols) + ".npy") print X_test.shape # Load the preprocessed data and labels X_train_all = np.load(path + "/trainPreproc_" + str(img_rows) + "_" + str(img_cols) + ".npy") Y_train_all = np.load(path + "/labelsPreproc.npy") X_train, X_val, Y_train, Y_val = \ train_test_split(X_train_all, Y_train_all, test_size=0.25, stratify=np.argmax(Y_train_all, axis=1)) print X_train.shape Y_val = convert_(Y_val) X_train = X_train.reshape((-1, 1, 32, 32)) # # # input shape for neural network # labels = labels.astype(np.uint8) X_val = X_val.reshape((-1, 1, 32, 32)) # # # input shape for neural network Y_val = Y_val.astype(np.uint8) # input_image_vector_shape = (None, 1, 32, 32) net1 = NeuralNet( layers=[ ('input', layers.InputLayer), ('conv2d1', layers.Conv2DLayer), ('maxpool1', layers.MaxPool2DLayer), ('conv2d2', layers.Conv2DLayer), ('maxpool2', layers.MaxPool2DLayer), ('conv2d3', layers.Conv2DLayer), ('maxpool3', layers.MaxPool2DLayer), # ('conv2d4', layers.Conv2DLayer), # ('maxpool4', layers.MaxPool2DLayer), ('dropout1', layers.DropoutLayer), ('dropout2', layers.DropoutLayer), ('dense', layers.DenseLayer), # ('dense2', layers.DenseLayer), ('output', layers.DenseLayer), ], input_shape=input_image_vector_shape, conv2d1_num_filters=128, conv2d1_filter_size=(3, 3), conv2d1_nonlinearity=lasagne.nonlinearities.tanh, conv2d1_W=lasagne.init.GlorotUniform(), conv2d1_pad=(2, 2), maxpool1_pool_size=(2, 2), conv2d2_num_filters=256, conv2d2_filter_size=(3, 3), conv2d2_nonlinearity=lasagne.nonlinearities.rectify, conv2d2_pad=(2, 2), maxpool2_pool_size=(2, 2), conv2d3_num_filters=512, conv2d3_filter_size=(3, 3), conv2d3_nonlinearity=lasagne.nonlinearities.rectify, conv2d3_pad=(2, 2), maxpool3_pool_size=(2, 2), dropout1_p=0.5, dropout2_p=0.5, dense_num_units=8192, dense_nonlinearity=lasagne.nonlinearities.rectify, # dense2_num_units = 16, # dense2_nonlinearity = lasagne.nonlinearities.rectify, output_nonlinearity=lasagne.nonlinearities.softmax, output_num_units=62, update=momentum, # 75.5 with tanh init dense num = 256% update_learning_rate=0.03, update_momentum=0.8, max_epochs=1000, verbose=1, ) print "Loading Neural Net Parameters..." net1.initialize_layers() net1.load_weights_from('{}_weightfile.w'.format(model_path)) net1.load_params_from('{}_paramfile.w'.format(model_path)) from sklearn.metrics import classification_report, accuracy_score, confusion_matrix print 'Testing...' y_true, y_pred = Y_val, net1.predict(X_val) # Get our predictions print(classification_report(y_true, y_pred)) # Classification on each digit print net1.predict(X_val) print Y_val a = confusion_matrix(Y_val, net1.predict(X_val)) b = np.trace(a) print 'Training Accuracy: ' + str(float(b) / float(np.sum(a)))
def main(input_file, model_path): batch_size = 128 nb_classes = 62 # A-Z, a-z and 0-9 nb_epoch = 2 # Input image dimensions img_rows, img_cols = 32, 32 # Path of data files path = input_file ### PREDICTION ### # # Load the model with the highest validation accuracy # model.load_weights("best.kerasModelWeights") # Load Kaggle test set X_test = np.load(path + "/testPreproc_" + str(img_rows) + "_" + str(img_cols) + ".npy") print X_test.shape # Load the preprocessed data and labels X_train_all = np.load(path + "/trainPreproc_" + str(img_rows) + "_" + str(img_cols) + ".npy") Y_train_all = np.load(path + "/labelsPreproc.npy") X_train, X_val, Y_train, Y_val = \ train_test_split(X_train_all, Y_train_all, test_size=0.25, stratify=np.argmax(Y_train_all, axis=1)) print X_train.shape Y_val = convert_(Y_val) X_train = X_train.reshape((-1, 1, 32, 32)) # # # input shape for neural network # labels = labels.astype(np.uint8) X_val = X_val.reshape((-1, 1, 32, 32)) # # # input shape for neural network Y_val = Y_val.astype(np.uint8) # input_image_vector_shape = (None, 1, 32, 32) # ''' @description: Two layer convolutional neural network ''' #input layer input_layer = ('input', layers.InputLayer) # fist layer design first_layer_conv_filter = layers.Conv2DLayer first_layer_pool_filter = layers.MaxPool2DLayer conv_filter = ('conv2d1', first_layer_conv_filter) pool_filter = ('maxpool1', first_layer_pool_filter) # second layer design second_layer_conv_filter = layers.Conv2DLayer second_layer_pool_filter = layers.MaxPool2DLayer conv_filter2 = ('conv2d2', second_layer_conv_filter) pool_filter2 = ('maxpool2', second_layer_pool_filter) # dropout rates ( used for regularization ) dropout_layer = layers.DropoutLayer drop1 = 0.5 drop2 = 0.5 first_drop_layer = ('dropout1', dropout_layer) second_drop_layer = ('dropout2', dropout_layer) # # network parameters design_layers = [ input_layer, conv_filter, pool_filter, conv_filter2, pool_filter2, first_drop_layer, ('dense', layers.DenseLayer), second_drop_layer, ('output', layers.DenseLayer) ] # Neural net object instance net1 = NeuralNet( # declare convolutional neural network layers # convolutional mapping and pooling window sized will be declared # and set to various sizes layers=design_layers, # input layer # vector size of image will be taken as 28 x 28 input_shape=input_image_vector_shape, # first layer convolutional filter # mapping layer set at 5 x 5 conv2d1_num_filters=32, conv2d1_filter_size=(5, 5), conv2d1_nonlinearity=lasagne.nonlinearities.rectify, conv2d1_W=lasagne.init.HeNormal(gain='relu'), # first layer convolutional pool filter # mapping layer set at 2 x 2 maxpool1_pool_size=(2, 2), # second layer convolutional filter # mapping layer set at 5 x 5 conv2d2_num_filters=32, conv2d2_filter_size=(5, 5), conv2d2_nonlinearity=lasagne.nonlinearities.rectify, # second layer convolutional pool filter # mapping layer set at 2 x 2 maxpool2_pool_size=(2, 2), dropout1_p=drop1, # hidden unit density dense_num_units=512, dense_nonlinearity=lasagne.nonlinearities.rectify, # dropout2 dropout2_p=drop2, # output output_nonlinearity=lasagne.nonlinearities.softmax, #corresponds to the amount of target labels to compare to output_num_units=62, # optimization method params # NOTE: Different momentum steepest gradient methods yield varied # results. update=nesterov_momentum, # 69 update_learning_rate=0.01, update_momentum=0.078, # update_learning_rate=1e-4, # update_momentum=0.9, # max_epochs=1000, # update_learning_rate=0.1, # update_momentum=0.003, max_epochs=1000, verbose=1, ) print "Loading Neural Net Parameters..." net1.initialize_layers() net1.load_weights_from('{}_weightfile.w'.format(model_path)) ''' new_twoLayer_paramfile.w new_twoLayer_weightfile.w ''' net1.load_params_from('{}_paramfile.w'.format(model_path)) from sklearn.metrics import classification_report, accuracy_score, confusion_matrix print 'Testing...' y_true, y_pred = Y_val, net1.predict(X_val) # Get our predictions print(classification_report(y_true, y_pred)) # Classification on each digit print net1.predict(X_val) print Y_val a = confusion_matrix(Y_val, net1.predict(X_val)) b = np.trace(a) print 'Training Accuracy: ' + str(float(b) / float(np.sum(a)))
layers=layer, update=updates.nesterov_momentum, update_momentum=0.9, update_learning_rate=theano.shared(floatX(0.001)), batch_iterator_train=batch_iterator_train, batch_iterator_test=batch_iterator_test, verbose=1, train_split=train_split, max_epochs=4, ) # load pretrained model with open('../input/pretrained/vgg16.pkl', 'rb') as f: params = pickle.load(f) # replace last 2 param layers ((4096,1000)) and (1000,) with ((4096,10)) and (10,) params['param values'][30] = params['param values'][30][:, :10] params['param values'][31] = params['param values'][31][:10] net.initialize_layers() layers.set_all_param_values(net.layers_.values(), params['param values']) print("Training neural network...") net.fit(X, y) del X X_test, ids = load_test_data(path, grayscale=False, img_shape=IMG_SHAPE) print("Predicting on test data...") y_proba = net.predict_proba(X_test) make_submission('../output/submission_01.csv', y_proba, ids)