return effective_rank print "Rank: {}, Hidden Layer Size: {}".format(compute_effective_rank(svals), svals.shape[0]) if isinstance(batch_size, int): if batch_size == 1: fig_outfile = 'perf_mlp_seque.png' else: fig_outfile = 'perf_mlp_minibatchsize_%d.png' % batch_size else: fig_outfile = 'perf_mlp_batch.png' if raw_input("Shall we save this model? (y/n)\n") == 'y': model_outfile = fig_outfile.split('.')[0] + ".pkl" fobj = open(model_outfile, 'wb') cPickle.dump(predict, fobj, protocol=cPickle.HIGHEST_PROTOCOL) fobj.close() if raw_input('Save training figure? (y/n): \n') == 'y': performanceplot(cost_record, tr_err_record, te_err_record, "contrast_" + fig_outfile) else: model_outfile = raw_input("Provide path to model_outfile: \n") fobj = open(model_outfile, 'rb') cPickle.load(fobj) fobj.close() if raw_input("Perform failure analysis? (y/n):\n") == 'y': failure_analysis.investigate_mlp(teX, teY, predict(teX) > 0.5)
batch_size=1; learning_rate=0.001; # sequential mode: single example cost = T.mean(Y - y_pred) update = [[w_o, w_o + learning_rate*cost*T.transpose(X)]] train = theano.function(inputs=[X, Y], outputs=cost, updates=update, allow_input_downcast=True) predict = theano.function(inputs=[X], outputs=y_pred, allow_input_downcast=True) print "batch_size: ", batch_size print "learning_rate: ", learning_rate cost_record = [] train_error_record = [] test_error_record = [] for epoch in range(100): if isinstance(batch_size, int): for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX), batch_size)): cost = train(trX[start:end], trY[start:end]) else: cost = train(trX, trY) cost_record.append(cost) if epoch % 1 == 0: train_error = 1-np.mean(np.sign(trY)== predict(trX)) test_error = 1-np.mean(np.sign(teY)== predict(teX)) train_error_record.append(train_error) test_error_record.append(test_error) print "%d,%0.4f,%0.4f" % (epoch, train_error, test_error) trX, trY = permute(trX, trY) performanceplot(cost_record, train_error_record, test_error_record, 'contrast_perf_perceptron.png')
updates=update, allow_input_downcast=True) predict = theano.function(inputs=[X], outputs=y_pred, allow_input_downcast=True) print "batch_size: ", batch_size print "learning_rate: ", learning_rate cost_record = [] train_error_record = [] test_error_record = [] for epoch in range(100): if isinstance(batch_size, int): for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX), batch_size)): cost = train(trX[start:end], trY[start:end]) else: cost = train(trX, trY) cost_record.append(cost) if epoch % 1 == 0: train_error = 1 - np.mean(np.sign(trY) == predict(trX)) test_error = 1 - np.mean(np.sign(teY) == predict(teX)) train_error_record.append(train_error) test_error_record.append(test_error) print "%d,%0.4f,%0.4f" % (epoch, train_error, test_error) trX, trY = permute(trX, trY) performanceplot(cost_record, train_error_record, test_error_record, 'contrast_perf_perceptron.png')
print "Rank: {}, Hidden Layer Size: {}".format( compute_effective_rank(svals), svals.shape[0]) if isinstance(batch_size, int): if batch_size == 1: fig_outfile = 'perf_mlp_seque.png' else: fig_outfile = 'perf_mlp_minibatchsize_%d.png' % batch_size else: fig_outfile = 'perf_mlp_batch.png' if raw_input("Shall we save this model? (y/n)\n") == 'y': model_outfile = fig_outfile.split('.')[0] + ".pkl" fobj = open(model_outfile, 'wb') cPickle.dump(predict, fobj, protocol=cPickle.HIGHEST_PROTOCOL) fobj.close() if raw_input('Save training figure? (y/n): \n') == 'y': performanceplot(cost_record, tr_err_record, te_err_record, "contrast_" + fig_outfile) else: model_outfile = raw_input("Provide path to model_outfile: \n") fobj = open(model_outfile, 'rb') cPickle.load(fobj) fobj.close() if raw_input("Perform failure analysis? (y/n):\n") == 'y': failure_analysis.investigate_mlp(teX, teY, predict(teX) > 0.5)