Exemple #1
0
    def __init__(self):
        # Get activation signal a
        emg_data = load_data('./data/ta_vs_gait.csv')
        emg_data = np.array(emg_data)
        self.emg_function = get_norm_emg(emg_data)

        emg_sol_data = load_data('./data/soleus_vs_gait.csv')
        emg_sol_data = np.array(emg_sol_data)
        self.emg_sol_function = get_norm_sol(emg_sol_data)

        # Get ankle angle
        ankle_data = load_data('./data/ankle_vs_gait.csv')
        ankle_data = np.array(ankle_data)
        self.ankle_function = get_regress_ankle(ankle_data)

        # Get knee angle
        knee_data = load_data('./data/knee_vs_gait.csv')
        knee_data = np.array(knee_data)
        self.knee_function = get_regress_general(knee_data)

        # Get hip angle
        hip_data = load_data('./data/hip_vs_gait.csv')
        hip_data = np.array(hip_data)
        self.hip_function = get_regress_hip(hip_data)

        # Get shank velocity
        x = np.arange(0.0, 100.0, 1.0)
        shank_vel_data = 100 * derivative(self.knee_function, x, h=0.001)
        shank_vel_data = np.transpose(np.array([x, shank_vel_data]))
        self.shank_velocity_function = get_regress_general(shank_vel_data)

        # Get thigh velocity
        thigh_vel_data = 100 * derivative(self.hip_function, x, h=0.001)
        thigh_vel_data = np.transpose(np.array([x, thigh_vel_data]))
        self.thigh_velocity_function = get_regress_hip(thigh_vel_data)
Exemple #2
0
def verify_lin_accel(t_start=0, t_end=1):
  motion_model = MotionModel()
  
  # Get real ankle angle data
  ankle_data = load_data('./data/ankle_vs_gait.csv')
  ankle_data = np.array(ankle_data)
  ankle_data = get_regress_general(ankle_data)

  x = np.arange(0,1,0.001)
  
  position = [[],[]]
  for ite in x:
      coord = motion_model.get_global(ankle_data.eval(ite*100)[0]*np.pi/180,0,0,ite)
      position[0].append(coord[0])
      position[1].append(coord[1])
      
  
  y = position[0]
  y = np.array([x, y])
  y_function = get_regress_ankle_height(np.transpose(y))
  plt.plot(x, position[0])
  plt.plot(x, y_function.eval(x))
  plt.show()
  
  ankle_vel_y_data = derivative(y_function, x, h=0.0001)
  plt.plot(x, ankle_vel_y_data)
  ankle_vel_y_data = np.transpose(np.array([x, ankle_vel_y_data]))
  ankle_vel_y_fun = get_regress_ankle_height(ankle_vel_y_data)
  plt.plot(x, ankle_vel_y_fun.eval(x))
  plt.show()
  
  plt.plot(x, derivative(ankle_vel_y_fun, x, h=0.0001))
  plt.show()
def get_pe_force_length_regression():
    data = load_data('./data/fl_curve_passive.csv')
    data = np.array(data)

    length_norm = data[:, 0]
    force_norm = data[:, 1]

    centres = np.arange(min(length_norm) - 0.1, max(length_norm) + 0.1, .1)
    width = .25
    result = Regression(length_norm,
                        force_norm,
                        centres,
                        width,
                        .01,
                        sigmoids=True)

    return result
def get_tendon_force_length_regression():
    data = load_data('./data/flcurve_tendon_norm.csv')
    data = np.array(data)

    length_tendon_norm = data[:, 0]
    force_tendon_norm = data[:, 1]

    centres = np.arange(
        min(length_tendon_norm) - 0.05,
        max(length_tendon_norm) + 0.05, 1e-2)
    width = .01
    result = Regression(length_tendon_norm,
                        force_tendon_norm,
                        centres,
                        width,
                        .01,
                        sigmoids=True)

    return result
def evaluate_lenet5(learning_rate=learning_rate, n_epochs=n_epochs_, batch_size=batch_size_):
    datasets = load_data(dataset_path, n_train, n_valid, n_test, img_size)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    index = T.lscalar()

    x = T.matrix('x')
    y = T.ivector('y')

    rng = numpy.random.RandomState(23455)

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    layer0_input = x.reshape((batch_size, feature_maps, img_h, img_w))

    layer0 = LeNetConvPoolLayer(rng, input=layer0_input, image_shape=(batch_size, feature_maps, img_h, img_w), filter_shape=(n_kerns[0], feature_maps, kern_size[0][0], kern_size[0][1]), poolsize=pool_size[0])

    #Modify shape from layer 0
    shapeA = (img_h-kern_size[0][0]+1)/pool_size[0][0]
    shapeB = (img_w-kern_size[0][1]+1)/pool_size[0][1]
    layer1 = LeNetConvPoolLayer(rng, input=layer0.output, image_shape=(batch_size, n_kerns[0], shapeA, shapeB), filter_shape=(n_kerns[1], n_kerns[0], kern_size[1][0], kern_size[1][1]), poolsize=pool_size[1])

    layer2_input = layer1.output.flatten(2)             

    #Modify shape from layer 1
    shapeA = (shapeA-kern_size[1][0]+1)/pool_size[1][0]
    shapeB = (shapeB-kern_size[1][1]+1)/pool_size[1][1]
    layer2 = HiddenLayer(rng, input=layer2_input, n_in=n_kerns[1]*shapeA*shapeB, n_out=batch_size, activation=T.tanh)

    layer3 = LogisticRegression(input=layer2.output, n_in=batch_size, n_out=n_outputs)

    cost = layer3.negative_log_likelihood(y)

    test_model = theano.function([index], layer3.errors(y), givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    validate_model = theano.function([index], layer3.errors(y), givens={
            x: valid_set_x[index * batch_size: (index + 1) * batch_size],
            y: valid_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    params = layer3.params + layer2.params + layer0.params

    grads = T.grad(cost, params)

    updates = [(param_i, param_i - learning_rate * grad_i) for param_i, grad_i in zip(params, grads)]

    train_model = theano.function([index], cost, updates=updates, givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],
            y: train_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'

    patience = 10000
    patience_increase = 2

    improvement_threshold = 0.995

    validation_frequency = min(n_train_batches, patience / 2)

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if iter % 100 == 0:
                print 'training @ iter = ', iter
            cost_ij = train_model(minibatch_index)

            if (iter + 1) % validation_frequency == 0:

                validation_losses = [validate_model(i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                if this_validation_loss < best_validation_loss:

                    if this_validation_loss < best_validation_loss *  \
                       improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    test_losses = [
                        test_model(i)
                        for i in xrange(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print('Optimization complete.')
    print('Best validation score of %f %% obtained at iteration %i, '
          'with test performance %f %%' %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
Exemple #6
0
def test_mlp(learning_rate=0.1, L1_reg=0.00, L2_reg=0.001, n_epochs=200, batch_size=30, n_hidden=100):
    datasets = load_data()

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'


    index = T.lscalar()
    x = T.matrix('x')
    y = T.ivector('y')

    rng = numpy.random.RandomState(1234)

    classifier = MLP(rng=rng, input=x, n_in=1024, n_hidden=n_hidden, n_out=10)

    cost = classifier.negative_log_likelihood(y) \
         + L1_reg * classifier.L1 \
         + L2_reg * classifier.L2_sqr

    test_model = theano.function(inputs=[index],
            outputs=classifier.errors(y),
            givens={
                x: test_set_x[index * batch_size:(index + 1) * batch_size],
                y: test_set_y[index * batch_size:(index + 1) * batch_size]})

    validate_model = theano.function(inputs=[index],
            outputs=classifier.errors(y),
            givens={
                x: valid_set_x[index * batch_size:(index + 1) * batch_size],
                y: valid_set_y[index * batch_size:(index + 1) * batch_size]})

    gparams = []
    for param in classifier.params:
        gparam = T.grad(cost, param)
        gparams.append(gparam)

    updates = []

    for param, gparam in zip(classifier.params, gparams):
        updates.append((param, param - learning_rate * gparam))


    train_model = theano.function(inputs=[index], outputs=cost,
            updates=updates,
            givens={
                x: train_set_x[index * batch_size:(index + 1) * batch_size],
                y: train_set_y[index * batch_size:(index + 1) * batch_size]})

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'

    patience = 10000

    patience_increase = 2

    improvement_threshold = 0.995 

    validation_frequency = min(n_train_batches, patience / 2)

    best_params = None
    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = time.clock()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            minibatch_avg_cost = train_model(minibatch_index)

            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:

                validation_losses = [validate_model(i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)

                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                     (epoch, minibatch_index + 1, n_train_batches,
                      this_validation_loss * 100.))

                if this_validation_loss < best_validation_loss:

                    if this_validation_loss < best_validation_loss *  \
                           improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    test_losses = [test_model(i) for i
                                   in xrange(n_test_batches)]
                    test_score = numpy.mean(test_losses)

                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                    done_looping = True
                    break

    end_time = time.clock()
    print(('Optimization complete. Best validation score of %f %% '
           'obtained at iteration %i, with test performance %f %%') %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
    viable = []
    for i in range(len(above_0_plot)):
        for j in range(len(above_0_plot[0])):
            if above_0_plot[i][j] == 1:
                viable.append([
                    rmse_toe_height_plot[i][j], independent_1[i][j],
                    independent_2[i][j]
                ])

    # Sorts by first element (ie RMSE)
    top_viable = sorted(viable)
    if len(top_viable) >= 5:
        top_viable = top_viable[:5]

    # Find fatigues
    emg_data = load_data('./data/ta_vs_gait.csv')
    emg_data = np.array(emg_data)
    emg_function = get_norm_emg(emg_data)

    fatigues = []
    all_fatigues = []
    for i in range(len(top_viable)):
        a = Activation(top_viable[i][1], top_viable[i][2], scaling,
                       non_linearity)
        a.get_activation_signal(emg_function)
        fatigues.append([a.get_fatigue(), i])

    for i in range(len(viable)):
        a = Activation(viable[i][1], viable[i][2], scaling, non_linearity)
        a.get_activation_signal(emg_function)
        all_fatigues.append([a.get_fatigue(), i])
Exemple #8
0
def verify_rotation_matrices(t_start=0, t_end=1):
    motion_model = MotionModel()

    # Get real ankle angle data
    ankle_data = load_data('./data/ankle_vs_gait.csv')
    ankle_data = np.array(ankle_data)
    ankle_data = get_regress_general(ankle_data)

    # Get real ankle height data
    ankle_height = load_data('./data/Foot-Centroid-Height_OG-vs-Gait.csv')
    ankle_height = np.array(ankle_height)
    ankle_height = np.transpose(ankle_height)
    ankle_height[0] = ankle_height[0] / 5
    ankle_height[1] = ankle_height[1] / 1000

    # Get real ankle horizontal data
    ankle_hor = load_data('./data/Foot-Centroid-Horizontal_OG-vs-Gait.csv')
    ankle_hor = np.array(ankle_hor)
    ankle_hor = np.transpose(ankle_hor)
    ankle_hor[0] = ankle_hor[0] / 5
    ankle_hor[1] = ankle_hor[1] / 1000

    x = np.arange(t_start, t_end, .01)

    position = [[], []]
    for ite in x:
        coord = motion_model.get_global(
            ankle_data.eval(ite * 100)[0] * np.pi / 180, 0.06674, -0.03581,
            ite)  #gets global coordinate of ankle
        position[0].append(coord[0])
        position[1].append(coord[1])

    # Plot ankle from literature
    plt.figure()
    plt.plot(x * 100, ankle_data.eval(x * 100) * np.pi / 180)
    plt.xlabel("% Gait Cycle")
    plt.ylabel("Ankle Angle Literature (rad)")
    plt.show()

    # Plot global horizontal of centroid
    plt.figure()
    plt.plot(ankle_hor[0][:-5] * 100, ankle_hor[1][:-5])
    plt.plot(x * 100, position[0], '--')
    plt.legend(('Raw Data', 'Computed Trajectory'))
    plt.xlabel("% Gait Cycle")
    plt.ylabel("Horizontal Position (m)")
    plt.title("Horizontal Position of the COM over the Gait Cycle")
    plt.show()

    # Plot global vertical of centroid
    plt.figure()
    plt.plot(ankle_height[0][:-5] * 100, ankle_height[1][:-5])
    plt.plot(x * 100, position[1], '--')
    plt.legend(('Raw Data', 'Computed Trajectory'))
    plt.xlabel("% Gait Cycle")
    plt.ylabel("Vertical Position (m)")
    plt.title("Vertical Position of the COM over the Gait Cycle")
    plt.show()

    # Plot phase portraits of centroid
    plt.figure()
    plt.plot(ankle_hor[1][:-5], ankle_height[1][:-5])

    plt.plot(position[0], position[1], '--')
    # plt.scatter(position[0][0], position[1][0], marker='x', color='r')
    # plt.text(position[0][0], position[1][0], 'start')
    # plt.scatter(position[0][-1], position[1][-1], marker='x', color='g')
    # plt.text(position[0][-1], position[1][-1], 'end')

    #  plt.scatter(ankle_hor[1][0], ankle_height[1][0], marker='x', color='r')
    #  plt.text(ankle_hor[1][0], ankle_height[1][0], 'start')
    #  plt.scatter(ankle_hor[1][-1], ankle_height[1][-1], marker='x', color='g')
    #  plt.text(ankle_hor[1][-1], ankle_height[1][-1], 'end')
    plt.legend(('Raw Data', 'Computed Trajectory'))
    plt.xlabel("Horizontal Position (m)")
    plt.ylabel("Vertical Position (m)")
    plt.title("Phase Portrait of Centroid Trajectory over the Gait Cycle")
    plt.show()
Exemple #9
0
    srt_idx = x_train.argsort(0)
    plt.plot(x_train[srt_idx].reshape(-1, 1),
             y_train[srt_idx].reshape(-1, 1),
             'go',
             label="trian data")
    plt.plot(x_train[srt_idx].reshape(-1, 1),
             train_result[srt_idx].reshape(-1, 1),
             'r-',
             label="regression value")
    plt.title("score:%f" % score)
    plt.legend()
    plt.show()


if __name__ == "__main__":
    x_data, y_data = load_data("ex0.txt")

    # 线性回归
    from sklearn import linear_model
    model_linear_regression = linear_model.LinearRegression()

    # 决策树回归
    from sklearn import tree
    model_decisiontree_regression = tree.DecisionTreeRegressor(
        min_weight_fraction_leaf=0.01)

    # SVM回归
    from sklearn import svm
    model_svr = svm.SVR()

    # KNN回归
def test_mlp(dataset,learning_rate=0.12, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000, batch_size=150, hidden_layers_sizes=[350,270,190,130,70,30]):
	datasets,length,testSet = load_data(dataset)
    	train_set_x, train_set_y = datasets[0]
    	valid_set_x, valid_set_y = datasets[1]
    	test_set_x, test_set_y = datasets[2]

    	n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    	n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    	n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size

    	######################
    	# BUILD ACTUAL MODEL #
    	######################
    	print '... building the model'

    	index = T.lscalar()  # index to a [mini]batch
    	x = T.matrix('x')  # the data is presented as rasterized images
    	y = T.matrix('y')  # the labels are presented as 1D vector of
                        	# [int] labels

    	rng = numpy.random.RandomState(1234)

    	classifier = MLP(
        	rng=rng,
        	input=x,
        	n_in=length,
        	hidden_layers_sizes=hidden_layers_sizes,
        	n_out=1
    	)

    	# start-snippet-4
    	# the cost we minimize during training is the negative log likelihood of
    	# the model plus the regularization terms (L1 and L2); cost is expressed
    	# here symbolically
    	cost = (
        	classifier.errors(y)
        	+ L1_reg * classifier.L1
        	+ L2_reg * classifier.L2_sqr
    	)
    	# end-snippet-4

    	# compiling a Theano function that computes the mistakes that are made
    	# by the model on a minibatch
    	test_model = theano.function(
        	inputs=[index],
        	outputs=classifier.errors(y),
        	givens={
            		x: test_set_x[index * batch_size:(index + 1) * batch_size],
            		y: test_set_y[index * batch_size:(index + 1) * batch_size]
        	}
    	)

    	validate_model = theano.function(
        	inputs=[index],
        	outputs=classifier.errors(y),
        	givens={
            		x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            		y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        	}
    	)
	''' test momentum
    	# start-snippet-5
    	# compute the gradient of cost with respect to theta (sotred in params)
    	# the resulting gradients will be stored in a list gparams
    	gparams = [T.grad(cost, param) for param in classifier.params]

    	# specify how to update the parameters of the model as a list of
    	# (variable, update expression) pairs

    	# given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
    	# same length, zip generates a list C of same size, where each element
    	# is a pair formed from the two lists :
    	#    C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
    	updates = [
        	(param, param - learning_rate * gparam)
        	for param, gparam in zip(classifier.params, gparams)
    	]
	'''
	updates = []
	momentum = 0.7
	#param_update = theano.shared(param.get_value()*0., broadcastable=param.broadcastable)
	for param in classifier.params:
		param_update = theano.shared(param.get_value()*0., broadcastable=param.broadcastable)
		updates.append((param, param - learning_rate*param_update))
        	# Note that we don't need to derive backpropagation to compute updates - just use T.grad!
        	updates.append((param_update, momentum*param_update + (1. - momentum)*T.grad(cost, param)))
	#def gradient_updates_momentum(cost, params, learning_rate, momentum):
	#	assert momentum < 1 and momentum >= 0
		
    	# compiling a Theano function `train_model` that returns the cost, but
    	# in the same time updates the parameter of the model based on the rules
    	# defined in `updates`
    	train_model = theano.function(
        	inputs=[index],
        	outputs=cost,
        	updates=updates,
        	givens={
            		x: train_set_x[index * batch_size: (index + 1) * batch_size],
            		y: train_set_y[index * batch_size: (index + 1) * batch_size]
        	}
    	)
    	# end-snippet-5

    	###############
    	# TRAIN MODEL #
    	###############
    	print '... training'

    	# early-stopping parameters
    	patience = 5000  # look as this many examples regardless
    	patience_increase = 2  # wait this much longer when a new best is
                           # found
    	improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    	validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch

    	best_validation_loss = numpy.inf
    	best_iter = 0
    	test_score = 0.
    	start_time = time.clock()

    	epoch = 0
    	done_looping = False

    	while (epoch < n_epochs) and (not done_looping):
        	epoch = epoch + 1
		if epoch>400 and epoch % 100 == 0:
			learning_rate = learning_rate * 0.9
        	for minibatch_index in xrange(n_train_batches):

            		minibatch_avg_cost = train_model(minibatch_index)
            		# iteration number
            		iter = (epoch - 1) * n_train_batches + minibatch_index

			if (iter + 1) % validation_frequency == 0:
                		# compute zero-one loss on validation set
                		validation_losses = [validate_model(i) for i
                                			in xrange(n_valid_batches)]
                		this_validation_loss = numpy.mean(validation_losses)

                		print(
                    			'epoch %i, minibatch %i/%i, validation error %f %%' %
                    			(
                        			epoch,
                        			minibatch_index + 1,
                        			n_train_batches,
                        			this_validation_loss * 100.
                    			)
                		)

                		# if we got the best validation score until now
                		if this_validation_loss < best_validation_loss:
                    			#improve patience if loss improvement is good enough
                    			if (
                        			this_validation_loss < best_validation_loss *
                        			improvement_threshold
                    			):
                        			patience = max(patience, iter * patience_increase)

                    			best_validation_loss = this_validation_loss
                    			best_iter = iter

                    			# test it on the test set
                    			test_losses = [test_model(i) for i
                                   			in xrange(n_test_batches)]
                    			test_score = numpy.mean(test_losses)

                    			print(('     epoch %i, minibatch %i/%i, test error of '
                           			'best model %f %%') %
                          			(epoch, minibatch_index + 1, n_train_batches,
                           			test_score * 100.))

			if patience <= iter:
                		done_looping = True
                		break

	end_time = time.clock()
	print(('Optimization complete. Best validation score of %f %% '
        	'obtained at iteration %i, with test performance %f %%') %
        	(best_validation_loss * 100., best_iter + 1, test_score * 100.))
    	print >> sys.stderr, ('The code for file ' +
        		os.path.split(__file__)[1] +
                        ' ran for %.2fm' % ((end_time - start_time) / 60.))
	prediction_model = theano.function(
                inputs=[],
                outputs = classifier.logRegressionLayer.y_pred,
                givens={
                        x: test_set_x
                }
        )
	produceSet = process_test_data(prediction_model(),testSet)
        print weightRecall(testSet,produceSet)
        print produceSet