コード例 #1
0
 def predict():
     """
     An example of how to load a trained model and use it
     to predict labels.
     """
 
     # load the saved model
     classifier = cPickle.load(open('best_model.pkl'))
 
     # compile a predictor function
     predict_model = theano.function(
         inputs=[classifier.input],
         outputs=classifier.y_pred)
 
     # We can test it on some examples from test test
     dataset='mnist.pkl.gz'
     datasets = load_data(dataset)
     test_set_x, test_set_y = datasets[2]
     test_set_x = test_set_x.get_value()
 
     predicted_values = predict_model(test_set_x[:10])
     print ("Predicted values for the first 10 examples in test set:")
     print predicted_values
コード例 #2
0
def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
                    dataset='mnist.pkl.gz',
                    nkerns=[20, 50], batch_size=100):

# def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
#                     dataset='mnist.pkl.gz',
#                     nkerns=[20, 50], batch_size=500):
    """ Demonstrates lenet on MNIST dataset

    :type learning_rate: float
    :param learning_rate: learning rate used (factor for the stochastic
                          gradient)

    :type n_epochs: int
    :param n_epochs: maximal number of epochs to run the optimizer

    :type dataset: string
    :param dataset: path to the dataset used for training /testing (MNIST here)

    :type nkerns: list of ints
    :param nkerns: number of kernels on each layer
    """

    rng = numpy.random.RandomState(23455)

    datasets = load_data(dataset)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
    n_test_batches = test_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size
    n_valid_batches /= batch_size
    n_test_batches /= batch_size

    # allocate symbolic variables for the data
    index = T.lscalar()  # index to a [mini]batch

    # start-snippet-1
    x = T.matrix('x')   # the data is presented as rasterized images
    y = T.ivector('y')  # the labels are presented as 1D vector of
                        # [int] labels

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
    # to a 4D tensor, compatible with our LeNetConvPoolLayer
    # (28, 28) is the size of MNIST images.
    #layer0_input = x.reshape((batch_size, 1, 28, 28))
    layer0_input = x.reshape((batch_size, 1, 256, 256))

    # Construct the first convolutional pooling layer:
    # filtering reduces the image size to (256-5+1 , 256-5+1) = (252, 252)
    # maxpooling reduces this further to (252/2, 252/2) = (126, 126)
    # 4D output tensor is thus of shape (batch_size, nkerns[0], 126, 126)

    # :param filter_shape: (number of filters, num input feature maps,
    #                          filter height, filter width)
    
    #:type image_shape: tuple or list of length 4
    #    :param image_shape: (batch size, num input feature maps,
    #                         image height, image width)

    layer0 = LeNetConvPoolLayer(
        rng,
        input=layer0_input,
        image_shape=(batch_size, 1, 256, 256),
        filter_shape=(nkerns[0], 1, 5, 5),
        poolsize=(2, 2)
    )

    # Construct the second convolutional pooling layer
    # filtering reduces the image size to (126-5+1, 126-5+1) = (122, 122)
    # maxpooling reduces this further to (122/2, 122/2) = (61, 61)
    # 4D output tensor is thus of shape (batch_size, nkerns[1], 61, 61)

    layer1 = LeNetConvPoolLayer(
        rng,
        input=layer0.output,
        image_shape=(batch_size, nkerns[0], 126, 126),
        filter_shape=(nkerns[1], nkerns[0], 5, 5),
        poolsize=(2, 2)
    )

    # Construct the third convolutional pooling layer
    # filtering reduces the image size to (61-5+1, 61-5+1) = (57, 57)
    # maxpooling reduces this further to (56/2, 56/2) = (28, 28)
    # 4D output tensor is thus of shape (batch_size, nkerns[1], 28, 28)

    layer2 = LeNetConvPoolLayer(
        rng,
        input=layer1.output,
        image_shape=(batch_size, nkerns[1], 61, 61),
        filter_shape=(nkerns[0], nkerns[1], 5, 5),
        poolsize=(2, 2)
    )

    # the HiddenLayer being fully-connected, it operates on 2D matrices of
    # shape (batch_size, num_pixels) (i.e matrix of rasterized images).
    # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
    # or (500, 50 * 4 * 4) = (500, 800) with the default values.
    #layer2_input = layer1.output.flatten(2)
    layer3_input = layer2.output.flatten(2)

    # construct a fully-connected sigmoidal layer
    layer3 = HiddenLayer(
        rng,
        input=layer3_input,
        n_in=nkerns[1] * 61 * 61,
        n_out=100,
        activation=T.tanh
    )

    # classify the values of the fully-connected sigmoidal layer
    #layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
    #layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=2)
    layer4 = LogisticRegression(input=layer3.output, n_in=100, n_out=2)

    # the cost we minimize during training is the NLL of the model
    #cost = layer3.negative_log_likelihood(y)
    cost = layer4.negative_log_likelihood(y)

    # create a function to compute the mistakes that are made by the model
    test_model = theano.function(
        [index],
        layer4.errors(y),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    validate_model = theano.function(
        [index],
        layer4.errors(y),
        givens={
            x: valid_set_x[index * batch_size: (index + 1) * batch_size],
            y: valid_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    # create a list of all model parameters to be fit by gradient descent
    #params = layer4.params + layer3.params + layer2.params + layer1.params + layer0.params
    params = layer4.params + layer3.params + layer2.params + layer1.params + layer0.params
    #params = layer4.params + layer3.params + layer1.params + layer0.params

    # create a list of gradients for all model parameters
    grads = T.grad(cost, params)

    # train_model is a function that updates the model parameters by
    # SGD Since this model has many parameters, it would be tedious to
    # manually create an update rule for each model parameter. We thus
    # create the updates list by automatically looping over all
    # (params[i], grads[i]) pairs.
    updates = [
        (param_i, param_i - learning_rate * grad_i)
        for param_i, grad_i in zip(params, grads)
    ]

    train_model = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],
            y: train_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )
    # end-snippet-1

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'
    # early-stopping parameters
    patience = 10000  # look as this many examples regardless
    patience_increase = 2  # wait this much longer when a new best is
                           # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch

    best_validation_loss = numpy.inf
    best_iter = 0
    test_score = 0.
    start_time = timeit.default_timer()

    epoch = 0
    done_looping = False

    while (epoch < n_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):

            iter = (epoch - 1) * n_train_batches + minibatch_index
            print iter
            if iter % 100 == 0:
                print 'training @ iter = ', iter
            cost_ij = train_model(minibatch_index)

            if (iter + 1) % validation_frequency == 0:

                # compute zero-one loss on validation set
                validation_losses = [validate_model(i) for i
                                     in xrange(n_valid_batches)]
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:

                    #improve patience if loss improvement is good enough
                    if this_validation_loss < best_validation_loss *  \
                       improvement_threshold:
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = [
                        test_model(i)
                        for i in xrange(n_test_batches)
                    ]
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = timeit.default_timer()
    print('Optimization complete.')
    print('Best validation score of %f %% obtained at iteration %i, '
          'with test performance %f %%' %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
コード例 #3
0
def test_SdA(finetune_lr=0.1, pretraining_epochs=2,
			pretrain_lr=0.001, training_epochs=2,
			dataset='mnist.pkl.gz', batch_size=1):
	"""
	Demonstrates how to train and test a stochastic denoising autoencoder.

	This is demonstrated on MNIST.

	:type learning_rate: float
	:param learning_rate: learning rate used in the finetune stage
	(factor for the stochastic gradient)

	:type pretraining_epochs: int
	:param pretraining_epochs: number of epoch to do pretraining

	:type pretrain_lr: float
	:param pretrain_lr: learning rate to be used during pre-training

	:type n_iter: int
	:param n_iter: maximal number of iterations ot run the optimizer

	:type dataset: string
	:param dataset: path the the pickled dataset

	"""
	
	data_path = '/Applications/MAMP/htdocs/DeepLearningTutorials/data/'
	# Use the following command if you want to run the dA in production
	# THEANO_FLAGS='floatX=float32,device=gpu0,nvcc.fastmath=True,cuda.root=/usr/local/cuda,mode=FAST_RUN' python SdA_v2.py
	#data_path = '/home/ubuntu/DeepLearningTutorials/data/'

	datasets = load_data(dataset)

	train_set_x, train_set_y = datasets[0]
	valid_set_x, valid_set_y = datasets[1]
	test_set_x, test_set_y = datasets[2]

	# compute number of minibatches for training, validation and testing
	n_train_batches = train_set_x.get_value(borrow=True).shape[0]
	n_train_batches /= batch_size

	# numpy random generator
	# start-snippet-3
	numpy_rng = numpy.random.RandomState(89677)
	print '... building the model'
	# construct the stacked denoising autoencoder class
	# sda = SdA(
	# 	numpy_rng=numpy_rng,
	# 	n_ins=128 * 128,
	# 	hidden_layers_sizes=[1000, 1000],
	# 	n_outs=21,
	# 	data_path=data_path
	# )

	sda = SdA(
		numpy_rng=numpy_rng,
		n_ins=128 * 128 * 3,
		hidden_layers_sizes=[1000, 1000],
		n_outs=3,
		data_path=data_path
	)

	# end-snippet-3 start-snippet-4
	#########################
	# PRETRAINING THE MODEL #
	#########################
	print '... getting the pretraining functions'
	pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
												batch_size=batch_size)

	print '... pre-training the model'
	start_time = timeit.default_timer()
	## Pre-train layer-wise
	corruption_levels = [.1, .2, .3]
	for i in xrange(sda.n_layers):
		# go through pretraining epochs
		for epoch in xrange(pretraining_epochs):
			# go through the training set
			c = []
			for batch_index in xrange(n_train_batches):
				c.append(pretraining_fns[i](index=batch_index,
						 corruption=corruption_levels[i],
						 lr=pretrain_lr))
			print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
			print numpy.mean(c)

	end_time = timeit.default_timer()

	print >> sys.stderr, ('The pretraining code for file ' +
						  os.path.split(__file__)[1] +
						  ' ran for %.2fm' % ((end_time - start_time) / 60.))

	from utils import tile_raster_images

	try:
		import PIL.Image as Image
	except ImportError:
		import Image

	# image = Image.fromarray(tile_raster_images(
	#     X=sda.dA_layers[0].W.get_value(borrow=True).T,
	#     img_shape=(128, 128), tile_shape=(10, 10),
	#     tile_spacing=(1, 1)))
	
	# print sda.dA_layers[1].W.get_value(borrow=True).T.shape
	# print sda.dA_layers[0].W.get_value(borrow=True).T.shape

	# image = Image.fromarray(tile_raster_images(
	#     X=sda.dA_layers[1].W.get_value(borrow=True).T,
	#     img_shape=(36, 36), tile_shape=(10, 10),
	#     tile_spacing=(1, 1)))
	# image.save('filters_corruption_30.png')

	# end-snippet-4
	########################
	# FINETUNING THE MODEL #
	########################

	# get the training, validation and testing function for the model
	print '... getting the finetuning functions'
	train_fn, validate_model, test_model = sda.build_finetune_functions(
		datasets=datasets,
		batch_size=batch_size,
		learning_rate=finetune_lr
	)

	print '... finetunning the model'
	# early-stopping parameters
	patience = 10 * n_train_batches  # look as this many examples regardless
	patience_increase = 2.  # wait this much longer when a new best is
							# found
	improvement_threshold = 0.995  # a relative improvement of this much is
								   # considered significant
	validation_frequency = min(n_train_batches, patience / 2)
								  # go through this many
								  # minibatche before checking the network
								  # on the validation set; in this case we
								  # check every epoch

	best_validation_loss = numpy.inf
	test_score = 0.
	start_time = timeit.default_timer()

	done_looping = False
	epoch = 0

	while (epoch < training_epochs) and (not done_looping):
		epoch = epoch + 1
		for minibatch_index in xrange(n_train_batches):
			minibatch_avg_cost = train_fn(minibatch_index)
			iter = (epoch - 1) * n_train_batches + minibatch_index

			if (iter + 1) % validation_frequency == 0:
				validation_losses = validate_model()
				this_validation_loss = numpy.mean(validation_losses)
				print('epoch %i, minibatch %i/%i, validation error %f %%' %
					  (epoch, minibatch_index + 1, n_train_batches,
					   this_validation_loss * 100.))

				# if we got the best validation score until now
				if this_validation_loss < best_validation_loss:

					#improve patience if loss improvement is good enough
					if (
						this_validation_loss < best_validation_loss *
						improvement_threshold
					):
						patience = max(patience, iter * patience_increase)

					# save best validation score and iteration number
					best_validation_loss = this_validation_loss
					best_iter = iter

					# test it on the test set
					test_losses = test_model()
					test_score = numpy.mean(test_losses)
					print(('     epoch %i, minibatch %i/%i, test error of '
						   'best model %f %%') %
						  (epoch, minibatch_index + 1, n_train_batches,
						   test_score * 100.))

			if patience <= iter:
				done_looping = True
				break

	end_time = timeit.default_timer()
	print(
		(
			'Optimization complete with best validation score of %f %%, '
			'on iteration %i, '
			'with test performance %f %%'
		)
		% (best_validation_loss * 100., best_iter + 1, test_score * 100.)
	)
	print >> sys.stderr, ('The training code for file ' +
						  os.path.split(__file__)[1] +
						  ' ran for %.2fm' % ((end_time - start_time) / 60.))


	# x = T.matrix('x')
	# index_1 = T.lscalar()    # index to a [mini]batch
	# index_2 = T.lscalar()    # index to a [mini]batch
	# getHV = sda.dA_layers[0].get_hidden_values(x)
	# getHiddenValues = theano.function(
	#     [index_1,index_2],
	#     getHV,
	#     givens={
	#         x: train_set_x[index_1:index_2]
	#     }
	# )
	# print getHiddenValues(0,len(train_set_x.get_value(borrow=True))).shape
	
	# da1output = T.matrix('da1output')
	# getHV2 = sda.dA_layers[1].get_hidden_values(da1output)
	# getHiddenValues2 = theano.function(
	#     [da1output],
	#     getHV2
	# )
	# #print getHiddenValues2(getHiddenValues(0,1)).shape
	# X = getHiddenValues2(getHiddenValues(0,len(train_set_x.get_value(borrow=True))))

	sda.save_weights()

	# sda2 = SdA(
	# 	numpy_rng=numpy_rng,
	# 	n_ins=128 * 128,
	# 	hidden_layers_sizes=[1000, 1000],
	# 	n_outs=21,
	# 	data_path=data_path
	# )

	sda2 = SdA(
		numpy_rng=numpy_rng,
		n_ins=128 * 128 * 3,
		hidden_layers_sizes=[1000, 1000],
		n_outs=3,
		data_path=data_path
	)

	sda2.load_weights()
	#print sda2.dA_layers[1].W.get_value(borrow=True).shape
	x = T.matrix('x')
	index_1 = T.lscalar()    # index to a [mini]batch
	index_2 = T.lscalar()    # index to a [mini]batch
	getHV = sda2.dA_layers[0].get_hidden_values(x)
	getHiddenValues = theano.function(
		[index_1,index_2],
		getHV,
		givens={
			x: train_set_x[index_1:index_2]
		}
	)

	#print getHiddenValues(0,len(train_set_x.get_value(borrow=True))).shape
	print getHiddenValues(0,1)
	
	da1output = T.matrix('da1output')
	getHV2 = sda2.dA_layers[1].get_hidden_values(da1output)
	getHiddenValues2 = theano.function(
		[da1output],
		getHV2
	)
	#print getHiddenValues2(getHiddenValues(0,1)).shape
	X = getHiddenValues2(getHiddenValues(0,len(train_set_x.get_value(borrow=True))))
	print X.shape

	# print X.shape
	# da2output = T.matrix('da2output')
	# getHV3 = sda.dA_layers[2].get_hidden_values(da2output)
	# getHiddenValues3 = theano.function(
	#     [da2output],
	#     getHV3
	# )
	# print getHiddenValues3([getHiddenValues2(0,1)])


	from fetex_image import FetexImage
	pkl_file = open(data_path + 'im_index.pkl', 'rb')
	im_index = cPickle.load(pkl_file)

	fe = FetexImage(verbose=True,support_per_class=10000,data_path=data_path, dataset='categories', mode='RGB')
	fe.im_index = im_index
	
	# print im_index[0]
	# print im_index[1]
	#X_compressed = getHiddenValues(0,100)
	X_compressed = X
	#print X_compressed.shape
	#fe.dimReductionSdA(X)
	fe.similarImages(X_compressed)
コード例 #4
0
ファイル: dA_v2.py プロジェクト: webeng/DeepLearningTutorials
def test_dA(learning_rate=0.1, training_epochs=50,
            dataset='mnist.pkl.gz',
            batch_size=20, output_folder='dA_plots'):

    """
    This demo is tested on MNIST

    :type learning_rate: float
    :param learning_rate: learning rate used for training the DeNosing
                          AutoEncoder

    :type training_epochs: int
    :param training_epochs: number of epochs used for training

    :type dataset: string
    :param dataset: path to the picked dataset

    """
    datasets = load_data(dataset)
    train_set_x, train_set_y = datasets[0]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size

    # start-snippet-2
    # allocate symbolic variables for the data
    index = T.lscalar()    # index to a [mini]batch
    x = T.matrix('x')  # the data is presented as rasterized images
    # end-snippet-2

    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)
    os.chdir(output_folder)

    ####################################
    # BUILDING THE MODEL NO CORRUPTION #
    ####################################

    rng = numpy.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2 ** 30))

    da = dA(
        numpy_rng=rng,
        theano_rng=theano_rng,
        input=x,
        n_visible=128 * 128,
        n_hidden=500
    )

    cost, updates = da.get_cost_updates(
        corruption_level=0.,
        learning_rate=learning_rate
    )

    train_da = theano.function(
        [index],
        cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size]
        }
    )

    start_time = timeit.default_timer()

    ############
    # TRAINING #
    ############

    # go through training epochs
    for epoch in xrange(training_epochs):
        # go through trainng set
        c = []
        for batch_index in xrange(n_train_batches):
            c.append(train_da(batch_index))

        print 'Training epoch %d, cost ' % epoch, numpy.mean(c)

    end_time = timeit.default_timer()

    training_time = (end_time - start_time)

    print >> sys.stderr, ('The no corruption code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((training_time) / 60.))
    # image = Image.fromarray(
    #     tile_raster_images(X=da.W.get_value(borrow=True).T,
    #                        img_shape=(128, 128), tile_shape=(10, 10),
    #                        tile_spacing=(1, 1)))
    # image.save('filters_corruption_0.png')
    # print train_set_x.get_value(borrow=True).shape
    # sample = train_set_x.get_value(borrow=True)[0]
    # print sample.shape
    # print da.get_hidden_values(sample)
    # W = da.W.get_value(borrow=True).T
    # print da.W.get_value(borrow=True).T.shape
    # print da.W.get_value(borrow=True).T[0].shape
    #sample = T.ivector('sample')
    #sample = T.matrix('sample')
    index_1 = T.lscalar()    # index to a [mini]batch
    index_2 = T.lscalar()    # index to a [mini]batch
    getHV = da.get_hidden_values(x)
    getHiddenValues = theano.function(
        [index_1,index_2],
        getHV,
        givens={
            x: train_set_x[index_1:index_2]
        }
    )
    #print getHiddenValues(0,1).shape
    import cPickle
    from fetex_image import FetexImage
    pkl_file = open('/Applications/MAMP/htdocs/DeepLearningTutorials/data/im_index.pkl', 'rb')
    im_index = cPickle.load(pkl_file)

    data_path = '/Applications/MAMP/htdocs/DeepLearningTutorials/data/'
#store = pd.HDFStore('/Applications/MAMP/htdocs/DeepLearningTutorials/data/df_images.h5', 'r')
    fe = FetexImage(verbose=True,support_per_class=100,data_path=data_path, dataset='categories', mode='RGB')
    fe.im_index = im_index
    
    # print im_index[0]
    # print im_index[1]
    X_compressed = getHiddenValues(0,100)
    print X_compressed.shape
    fe.similarImages(X_compressed,pca=False)
    # print getHiddenValues(0,1).shape
    # print sum(X_compressed[0])
    # print sum(getHiddenValues(1,2)[0])
    #print sum(getHiddenValues(100,101)[0])

    # start-snippet-3
    #####################################
    # BUILDING THE MODEL CORRUPTION 30% #
    #####################################
    
    # rng = numpy.random.RandomState(123)
    # theano_rng = RandomStreams(rng.randint(2 ** 30))

    # da = dA(
    #     numpy_rng=rng,
    #     theano_rng=theano_rng,
    #     input=x,
    #     n_visible=128 * 128,
    #     n_hidden=500
    # )

    # cost, updates = da.get_cost_updates(
    #     corruption_level=0.3,
    #     learning_rate=learning_rate
    # )

    # train_da = theano.function(
    #     [index],
    #     cost,
    #     updates=updates,
    #     givens={
    #         x: train_set_x[index * batch_size: (index + 1) * batch_size]
    #     }
    # )

    # start_time = timeit.default_timer()

    # ############
    # # TRAINING #
    # ############

    # # go through training epochs
    # for epoch in xrange(training_epochs):
    #     # go through trainng set
    #     c = []
    #     for batch_index in xrange(n_train_batches):
    #         c.append(train_da(batch_index))

    #     print 'Training epoch %d, cost ' % epoch, numpy.mean(c)

    # end_time = timeit.default_timer()

    # training_time = (end_time - start_time)

    # print >> sys.stderr, ('The 30% corruption code for file ' +
    #                       os.path.split(__file__)[1] +
    #                       ' ran for %.2fm' % (training_time / 60.))
    # # end-snippet-3

    # # start-snippet-4
    # image = Image.fromarray(tile_raster_images(
    #     X=da.W.get_value(borrow=True).T,
    #     img_shape=(128, 128), tile_shape=(10, 10),
    #     tile_spacing=(1, 1)))
    # image.save('filters_corruption_30.png')
    # # end-snippet-4

    # print da.W.get_value(borrow=True).T

    os.chdir('../')
コード例 #5
0
    import numpy as np
    from numpy import linalg as LA 
    dot_product =  np.dot(a,b.T)
    cosine_distance = dot_product / (LA.norm(a) * LA.norm(b))
    return cosine_distance

if __name__ == '__main__':
	
	base_path = '/Applications/MAMP/htdocs/DeepLearningTutorials' 
	#base_path = '/home/ubuntu/DeepLearningTutorials' 

	from fetex_image import FetexImage
	from PIL import Image
	import random

	datasets = load_data('mnist.pkl.gz')

	train_set_x, train_set_y = datasets[0]
	valid_set_x, valid_set_y = datasets[1]
	test_set_x, test_set_y = datasets[2]

	cnn = MetaCNN(learning_rate=0.05,nkerns=[48,128,256], filters=[13,5,4], batch_size=64,poolsize=[(2,2),(2,2),(2,2)], n_hidden=[200,50,2] , n_out=2, im_width=128,im_height=128)
	# cnn.fit(train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y, n_epochs=5)
	# cnn.save(fpath=base_path + '/data/')


	#folder = base_path + '/data/cnn-furniture/'

	# Predictions after training
	cnn.load(base_path + '/data/best_model.pkl')
	#cnn.load('/home/ubuntu/DeepLearningTutorials/data/MetaCNN.2015-10-19-13:59:18.pkl')
コード例 #6
0
def predict():

    from sktheano_cnn import MetaCNN as CNN
    cnn = CNN()

    pkl_file = open( '../data/train_set.pkl', 'rb')
    train_set = cPickle.load(pkl_file)

    pkl_file = open( '../data/valid_set.pkl', 'rb')
    valid_set = cPickle.load(pkl_file)

    pkl_file = open( '../data/test_set.pkl', 'rb')
    test_set = cPickle.load(pkl_file)

    """An example of how to load a trained model and use it
    to predict labels.
    """

    fe = FetexImage(verbose=True)
    # load the saved model
    classifier = cPickle.load(open('best_model.pkl'))

    layer0 = cPickle.load(open('../data/layer0.pkl'))
    layer1 = cPickle.load(open('../data/layer1.pkl'))
    # layer2 = cPickle.load(open('../data/layer2.pkl')) 
    layer3 = cPickle.load(open('../data/layer3.pkl'))
    layer4 = cPickle.load(open('../data/layer4.pkl'))

    #layer0_input = x.reshape((batch_size, 3, 64, 64))

    # predict = theano.function(
    #     outputs=layer4.y_pred,
    #     givens = {x : train_set_x[0] }
    # )

    # compile a predictor function
    predict_model = theano.function(
        inputs=[classifier.input],
        outputs=classifier.y_pred)

    # We can test it on some examples from test test
    dataset='mnist.pkl.gz'
    datasets = load_data(dataset)
    
    test_set_x, test_set_y = datasets[2]
    test_set_x = test_set_x.get_value()
    
    train_set_x, train_set_y = datasets[0]
    train_set_x = train_set_x.get_value()

    pkl_file = open( '../data/X_original.pkl', 'rb')
    X_original = cPickle.load(pkl_file)

    a = X_original[0]
    #fe.reconstructImage(a).show()

    #predicted_values = predict_model([a])

    get_input = theano.function(
        inputs=[classifier.input],
        outputs=classifier.input
    )
    
    a = get_input(train_set_x[0:1])
    #print a.shape
    
    x = T.matrix('x')   # the data is presented as rasterized images
    # predict = theano.function(
    #     inputs = [x],
    #     outputs=layer3.output
    # )

    #layer0_input = x.reshape((batch_size, 3, 64, 64))
    predict = theano.function(
        inputs = [layer0.input],
        outputs=layer4.y_pred
    )
    # givens = { x : train_set_x[0] }
    #train_set_x = train_set_x[0:400]
    #x = train_set_x.reshape((400, 3, 64, 64))
    x = train_set_x.reshape(np.zeros((400,3,64,64)))
    print predict(x)
    #predicted_values = predict_model([train_set_x[0]])
    #print predicted_values
    return "fffff"

    """