def C_comparison(length,features_train,labels_train,features_test,labels_test): C = [0.001,0.05,0.1,0.3,0.5,0.8,1,10,100,350,500,1000,3500,5000,10000,50000,100000] scores = [] for c in C: model = LogisticRegression.train(features_train,labels_train,c) prediction = LogisticRegression.predict(features_test,model) scores.append((measures.avgF1(labels_test,prediction,0,1))) plt.plot(C,scores,color="blue",linewidth="2.0") plt.xticks(C) plt.ylabel("F1") plt.xlabel("C") plt.show()
def plot_learning_curve(features_train,labels_train,features_test,labels_test,C=1): #run for every 10% of training set and compute training error and testing error step = len(features_train)/10 train = [] test = [] maj_clas = [] for i in range(0,10): print i #train for (i+1)*10 percent of training set f = features_train[0:((i+1)*(step))] l=labels_train[0:((i+1)*(step))] #train classifier for the specific subset of training set model = LogisticRegression.train(f,l) #model = SVM.train(f,l,c=C,k="linear") #get training error prediction = LogisticRegression.predict(f,model) #prediction = SVM.predict(f,model) train.append(measures.error(l,prediction)) #get testing error prediction = LogisticRegression.predict(features_test,model) #prediction = SVM.predict(features_test,model) test.append(measures.error(labels_test,prediction)) #get error for majority classifier prediction = MajorityClassifier.predictSubj(features_test) maj_clas.append(measures.error(labels_test,prediction)) #karabatsis = [0.6431]*len(train) x = np.arange(len(train))*10 plt.plot(x,train,color="blue",linewidth="2.0",label="Training Error") plt.plot(x,test,color="blue",linestyle="dashed",linewidth="2.0",label="Testing Error") plt.plot(x,maj_clas,color="red",linewidth="2.0",label="Majority Classifier Error") #plt.plot(x,karabatsis,color="green",linewidth="2.0",label="Karabatsis 14") plt.ylim(0,1) plt.ylabel('Error') plt.xlabel("% of messages") plt.legend(loc="lower left") plt.show()
def plotFeaturesF1(features_train,labels_train,features_test,labels_test): x = list(np.arange(len(features_train[0]))) #x = list(np.arange(5)) y = [] for i in range(0,len(features_train[0])): f_train = features_train[:,i] f_test = features_test[:,i] f_train = f_train.reshape(f_train.shape[0],1) f_test = f_test.reshape(f_test.shape[0],1) model = LogisticRegression.train(f_train,labels_train) prediction = LogisticRegression.predict(f_test,model) y.append(measures.avgF1(labels_test,prediction,0,1)) plt.plot(x,y,color="blue",linewidth="2.0") plt.ylabel("F1") plt.xlabel("# of Feature") plt.xticks(x) plt.show()
def plot_recall_precision(length,features_train,labels_train,features_test,labels_test): #threshold=[0.1 ,0.2 ,0.3 ,0.4,0.5,0.6,0.7,0.8,0.9] threshold = [x / 1000.0 for x in range(0, 1001, 1)] step = length/3 colors=['b','r','g'] for i in range(0,3): #((i+1)*(step)) percent of train data f = features_train[0:((i+1)*(step))] l=labels_train[0:((i+1)*(step))] #train classifier for the specific subset of training set model = LogisticRegression.train(f,l) #recall-precision for every threshold value recall = [] precision=[] for t in threshold : prediction = LogisticRegression.predict(features_test,model,t) recall.append(measures.recall(labels_test,prediction,0)) precision.append(measures.precision(labels_test,prediction,0)) plt.plot(recall,precision,linewidth="2.0",label=str((i+1)*33)+"% of train data",color=colors[i]) plt.xlim(0,1) plt.ylim(0,1) plt.xlabel('Recall') plt.ylabel('Precision') plt.title('Negative tweets') plt.legend() plt.show()
def build_model(datasets, batch_size, rng, learning_rate): x = T.matrix('x') y = T.ivector('y') index = T.lscalar() #reshape the image as input to the first conv pool layer #MNIST images are 28x28 layer0_input = x.reshape((batch_size, 1, 32, 32)) layer0_conv = ConvolutionLayer(rng=rng, input=layer0_input, input_shape=(batch_size, 1, 32, 32), filter_shape=(6, 1, 5, 5)) layer0_subsample = SubsampleLayer(rng=rng, input=layer0_conv.output, input_shape=(batch_size, 6, 28, 28), pool_size=(2, 2)) #the custom convolution layer: C4 in lecun layer1_conv = CustomConvLayer(rng=rng, input=layer0_subsample.output, input_shape=(batch_size, 6, 14, 14), filter_shape=(16, 6, 5, 5)) layer1_subsample = SubsampleLayer(rng=rng, input=layer1_conv.output, input_shape=(batch_size, 16, 10, 10), pool_size=(2, 2)) layer2_conv = ConvolutionLayer(rng=rng, input=layer1_subsample.output, input_shape=(batch_size, 16, 5, 5), filter_shape=(120, 16, 5, 5)) #flatten the output of the convpool layer for input to the MLP layer layer3_input = layer2_conv.output.flatten(2) layer3 = HiddenLayer(rng, input=layer3_input, n_in=120, n_out=84, activation=T.tanh) #TODO: Change to RBF layer4 = LogisticRegression(input=layer3.output, n_in=84, n_out=10) cost = layer4.negative_log_likelihood(y) params = layer4.params + layer3.params + \ layer2_conv.params + \ layer1_conv.params + layer1_subsample.params + \ layer0_conv.params + layer0_subsample.params gradients = T.grad(cost, params) updates = [(param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, gradients)] train_set_x, train_set_y = datasets[0] test_set_x, test_set_y = datasets[1] train_model = theano.function( [index], cost, updates=updates, givens={ x: train_set_x[index * batch_size:(index + 1) * batch_size], y: train_set_y[index * batch_size:(index + 1) * batch_size] }) test_model = theano.function( [index], layer4.errors(y), givens={ x: test_set_x[index * batch_size:(index + 1) * batch_size], y: test_set_y[index * batch_size:(index + 1) * batch_size] }) train_pred = theano.function( [index], layer4.y_pred, givens={ x: train_set_x[index * batch_size:(index + 1) * batch_size], }) test_pred = theano.function([index], layer4.y_pred, givens={ x: test_set_x[index * batch_size:(index + 1) * batch_size], }) return (train_model, train_pred, test_model, test_pred)
def evaluate_net(learning_rate=0.03, n_epochs=200, dataset='mnist.pkl.gz', nkerns=[10, 15, 20], batch_size=200): """ Demonstrates lenet on MNIST dataset :type learning_rate: float :param learning_rate: learning rate used (factor for the stochastic gradient) :type n_epochs: int :param n_epochs: maximal number of epochs to run the optimizer :type dataset: string :param dataset: path to the dataset used for training /testing (MNIST here) :type nkerns: list of ints :param nkerns: number of kernels on each layer """ rng = numpy.random.RandomState(23455) datasets = prepare_training_data(dataset) train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x, test_set_y = datasets[2] #TODO: dynamic edge_length = 128 # compute number of minibatches for training, validation and testing n_train_samples = train_set_x.get_value(borrow=True).shape[0] n_valid_samples = valid_set_x.get_value(borrow=True).shape[0] n_test_samples = test_set_x.get_value(borrow=True).shape[0] logger.info("Set sizes:\n\tTrain: {}\n\tValid: {}\n\tTest: {}" .format(n_train_samples, n_valid_samples, n_test_samples)) n_train_batches = n_train_samples / batch_size n_valid_batches = n_valid_samples / batch_size n_test_batches = n_test_samples / batch_size #make sure, that the batch size isn't too big assert n_train_batches > 0 and n_valid_batches > 0 and n_test_batches > 0 # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch # start-snippet-1 x = T.matrix('x') # the data is presented as rasterized images y = T.ivector('y') # the labels are presented as 1D vector of # [int] labels ###################### # BUILD ACTUAL MODEL # ###################### print '... building the model' # Reshape matrix of rasterized images of shape (batch_size, 28 * 28) # to a 4D tensor, compatible with our LeNetConvPoolLayer # (28, 28) is the size of MNIST images. layer0_input = x.reshape((batch_size, 1, edge_length, edge_length)) # Construct the first convolutional pooling layer: # filtering reduces the image size to (128-5+1 , 128-5+1) = (124, 124) # maxpooling reduces this further to (124/2, 124/2) = (62, 62) # 4D output tensor is thus of shape (batch_size, nkerns[0], 62, 62) layer0 = LeNetConvPoolLayer( rng, input=layer0_input, image_shape=(batch_size, 1, edge_length, edge_length), filter_shape=(nkerns[0], 1, 5, 5), poolsize=(2, 2) ) # Construct the second convolutional pooling layer # filtering reduces the image size to (62-7+1, 62-7+1) = (56, 56) # maxpooling reduces this further to (56/2, 56/2) = (28, 28) # 4D output tensor is thus of shape (nkerns[0], nkerns[1], 28, 28) layer1 = LeNetConvPoolLayer( rng, input=layer0.output, image_shape=(batch_size, nkerns[0], 62, 62), filter_shape=(nkerns[1], nkerns[0], 7, 7), poolsize=(2, 2) ) # Construct the second convolutional pooling layer # filtering reduces the image size to (28-5+1, 28-5+1) = (24, 24) # maxpooling reduces this further to (24/4, 24/4) = (6, 6) # 4D output tensor is thus of shape (nkerns[0], nkerns[1], 6, 6) layer1b = LeNetConvPoolLayer( rng, input=layer1.output, image_shape=(batch_size, nkerns[1], 28, 28), filter_shape=(nkerns[2], nkerns[1], 5, 5), poolsize=(4, 4) ) # the HiddenLayer being fully-connected, it operates on 2D matrices of # shape (batch_size, num_pixels) (i.e matrix of rasterized images). # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4), # or (500, 50 * 4 * 4) = (500, 800) with the default values. layer2_input = layer1b.output.flatten(2) # construct a fully-connected sigmoidal layer layer2 = HiddenLayer( rng, input=layer2_input, n_in=nkerns[2] * 6 * 6, n_out=300, activation=T.tanh ) # classify the values of the fully-connected sigmoidal layer layer3 = LogisticRegression(input=layer2.output, n_in=300, n_out=2) # the cost we minimize during training is the NLL of the model cost = layer3.negative_log_likelihood(y) # create a function to compute the mistakes that are made by the model test_model = theano.function( [index], layer3.errors(y), givens={ x: test_set_x[index * batch_size: (index + 1) * batch_size], y: test_set_y[index * batch_size: (index + 1) * batch_size] } ) validate_model = theano.function( [index], layer3.errors(y), givens={ x: valid_set_x[index * batch_size: (index + 1) * batch_size], y: valid_set_y[index * batch_size: (index + 1) * batch_size] } ) # create a list of all model parameters to be fit by gradient descent params = layer3.params + layer2.params + layer1.params + layer0.params # create a list of gradients for all model parameters grads = T.grad(cost, params) # train_model is a function that updates the model parameters by # SGD Since this model has many parameters, it would be tedious to # manually create an update rule for each model parameter. We thus # create the updates list by automatically looping over all # (params[i], grads[i]) pairs. updates = [ (param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads) ] train_model = theano.function( [index], cost, updates=updates, givens={ x: train_set_x[index * batch_size: (index + 1) * batch_size], y: train_set_y[index * batch_size: (index + 1) * batch_size] } ) # end-snippet-1 ############### # TRAIN MODEL # ############### print '... training' # early-stopping parameters patience = 10000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is found improvement_threshold = 0.995 # a relative improvement of this much is considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_validation_loss = numpy.inf best_iter = 0 test_score = 0. start_time = time.clock() plt.axis([0, n_epochs, 0, 0.6]) plt.xlabel('Epoch') plt.ylabel('Error') plt.title('Validation & Test Error') plt.ion() plt.show() #einmal alle bilder sehen = epoch epoch = 0 done_looping = False while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): #jede batch eine Iteration? iter = (epoch - 1) * n_train_batches + minibatch_index if iter % 100 == 0: print 'training @ iter = ', iter cost_ij = train_model(minibatch_index) if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [validate_model(i) for i in xrange(n_valid_batches)] this_validation_loss = numpy.mean(validation_losses) print('epoch %i, minibatch %i/%i, validation error %f %%' % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) plt.plot(epoch-1, this_validation_loss, 'bs') plt.draw() # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * \ improvement_threshold: patience = max(patience, iter * patience_increase) # save best validation score and iteration number best_validation_loss = this_validation_loss best_iter = iter # test it on the test set test_losses = [ test_model(i) for i in xrange(n_test_batches) ] test_score = numpy.mean(test_losses) print((' epoch %i, minibatch %i/%i, test error of ' 'best model %f %%') % (epoch, minibatch_index + 1, n_train_batches, test_score * 100.)) plt.plot(epoch-1, test_score, 'g^') plt.draw() if patience <= iter: done_looping = True break end_time = time.clock() print('Optimization complete.') print('Best validation score of %f %% obtained at iteration %i, ' 'with test performance %f %%' % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))
def build_model(datasets, batch_size, rng, learning_rate): x = T.matrix('x') y = T.ivector('y') index = T.lscalar() #reshape the image as input to the first conv pool layer #MNIST images are 28x28 layer0_input = x.reshape((batch_size, 1, 32, 32)) layer0_conv = ConvolutionLayer( rng = rng, input = layer0_input, input_shape = (batch_size, 1, 32, 32), filter_shape = (6, 1, 5, 5) ) layer0_subsample = SubsampleLayer( rng = rng, input = layer0_conv.output, input_shape = (batch_size, 6, 28, 28), pool_size = (2, 2) ) #the custom convolution layer: C4 in lecun layer1_conv = CustomConvLayer( rng = rng, input = layer0_subsample.output, input_shape = (batch_size, 6, 14, 14), filter_shape = (16, 6, 5, 5) ) layer1_subsample = SubsampleLayer( rng = rng, input = layer1_conv.output, input_shape = (batch_size, 16, 10, 10), pool_size = (2, 2) ) layer2_conv = ConvolutionLayer( rng = rng, input = layer1_subsample.output, input_shape = (batch_size, 16, 5, 5), filter_shape = (120, 16, 5, 5) ) #flatten the output of the convpool layer for input to the MLP layer layer3_input = layer2_conv.output.flatten(2) layer3 = HiddenLayer( rng, input = layer3_input, n_in = 120, n_out = 84, activation = T.tanh ) #TODO: Change to RBF layer4 = LogisticRegression( input = layer3.output, n_in = 84, n_out = 10 ) cost = layer4.negative_log_likelihood(y) params = layer4.params + layer3.params + \ layer2_conv.params + \ layer1_conv.params + layer1_subsample.params + \ layer0_conv.params + layer0_subsample.params gradients = T.grad(cost, params) updates = [(param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, gradients)] train_set_x, train_set_y = datasets[0] test_set_x, test_set_y = datasets[1] train_model = theano.function( [index], cost, updates = updates, givens = { x: train_set_x[index * batch_size: (index + 1) * batch_size], y: train_set_y[index * batch_size: (index + 1) * batch_size] } ) test_model = theano.function( [index], layer4.errors(y), givens = { x: test_set_x[index * batch_size: (index + 1) * batch_size], y: test_set_y[index * batch_size: (index + 1) * batch_size] } ) train_pred = theano.function( [index], layer4.y_pred, givens = { x: train_set_x[index * batch_size: (index + 1) * batch_size], } ) test_pred = theano.function( [index], layer4.y_pred, givens = { x: test_set_x[index * batch_size: (index + 1) * batch_size], } ) return (train_model, train_pred, test_model, test_pred)