예제 #1
0
 def testCNN(self):
     conf_filename = './sentiment_cnn.conf'
     # Build the architecture of CNN
     start_time = time.time()
     configer = CNNConfiger(conf_filename)
     convnet = ConvNet(configer, verbose=True)
     end_time = time.time()
     pprint('Time used to build the architecture of CNN: %f seconds' %
            (end_time - start_time))
     # Training
     learn_rate = 0.5
     batch_size = configer.batch_size
     num_batches = self.train_size / batch_size
     start_time = time.time()
     for i in xrange(configer.nepoch):
         right_count = 0
         tot_cost = 0
         # rate = learn_rate
         rate = learn_rate / (i / 100 + 1)
         for j in xrange(num_batches):
             minibatch = self.senti_train_set[j * batch_size:(j + 1) *
                                              batch_size, :]
             minibatch = minibatch.reshape(
                 (batch_size, 1, configer.image_row, configer.image_col))
             label = self.senti_train_label[j * batch_size:(j + 1) *
                                            batch_size]
             cost, accuracy = convnet.train(minibatch, label, rate)
             prediction = convnet.predict(minibatch)
             right_count += np.sum(label == prediction)
             tot_cost += cost
             # pprint('Epoch %d, batch %d, cost = %f, local accuracy: %f' % (i, j, cost, accuracy))
         accuracy = right_count / float(self.train_size)
         pprint('Epoch %d, total cost: %f, overall accuracy: %f' %
                (i, tot_cost, accuracy))
         ConvNet.save('./sentiment.cnn', convnet)
     end_time = time.time()
     pprint(
         'Time used to train CNN on Sentiment analysis task: %f minutes.' %
         ((end_time - start_time) / 60))
     # Test
     num_batches = self.test_size / batch_size
     right_count = 0
     for i in xrange(num_batches):
         minibatch = self.senti_test_set[i * batch_size:(i + 1) *
                                         batch_size, :]
         minibatch = minibatch.reshape(
             (batch_size, 1, configer.image_row, configer.image_col))
         label = self.senti_test_label[i * batch_size:(i + 1) * batch_size]
         prediction = convnet.predict(minibatch)
         right_count += np.sum(prediction == label)
     test_accuracy = right_count / float(self.test_size)
     pprint('Test set accuracy: %f' % test_accuracy)
예제 #2
0
	def testCNN(self):
		conf_filename = './sentiment_cnn.conf'
		# Build the architecture of CNN
		start_time = time.time()
		configer = CNNConfiger(conf_filename)
		convnet = ConvNet(configer, verbose=True)
		end_time = time.time()
		pprint('Time used to build the architecture of CNN: %f seconds' % (end_time-start_time))
		# Training
		learn_rate = 0.5
		batch_size = configer.batch_size
		num_batches = self.train_size / batch_size
		start_time = time.time()
		for i in xrange(configer.nepoch):
			right_count = 0
			tot_cost = 0
			# rate = learn_rate
			rate = learn_rate / (i/100+1)
			for j in xrange(num_batches):
				minibatch = self.senti_train_set[j*batch_size : (j+1)*batch_size, :]
				minibatch = minibatch.reshape((batch_size, 1, configer.image_row, configer.image_col))
				label = self.senti_train_label[j*batch_size : (j+1)*batch_size]
				cost, accuracy = convnet.train(minibatch, label, rate)
				prediction = convnet.predict(minibatch)
				right_count += np.sum(label == prediction)
				tot_cost += cost
				# pprint('Epoch %d, batch %d, cost = %f, local accuracy: %f' % (i, j, cost, accuracy))
			accuracy = right_count / float(self.train_size)
			pprint('Epoch %d, total cost: %f, overall accuracy: %f' % (i, tot_cost, accuracy))
			ConvNet.save('./sentiment.cnn', convnet)
		end_time = time.time()
		pprint('Time used to train CNN on Sentiment analysis task: %f minutes.' % ((end_time-start_time)/60))
		# Test
		num_batches = self.test_size / batch_size
		right_count = 0
		for i in xrange(num_batches):
			minibatch = self.senti_test_set[i*batch_size : (i+1)*batch_size, :]
			minibatch = minibatch.reshape((batch_size, 1, configer.image_row, configer.image_col))
			label = self.senti_test_label[i*batch_size : (i+1)*batch_size]
			prediction = convnet.predict(minibatch)
			right_count += np.sum(prediction == label)
		test_accuracy = right_count / float(self.test_size)
		pprint('Test set accuracy: %f' % test_accuracy)
예제 #3
0
image_row = configer.image_row
image_col = configer.image_col
nepoch = configer.nepoch
num_batches = training_size / batch_size
start_time = time.time()
for i in xrange(nepoch):
	for j in xrange(num_batches):
		minibatch = training_set[j*batch_size : (j+1)*batch_size, :]
		minibatch = minibatch.reshape((batch_size, 1, image_row, image_col))
		label = training_label[j*batch_size : (j+1)*batch_size]
		cost, accuracy = convnet.train(minibatch, label)
		pprint('Epoch %d, batch %d, cost = %f, accuracy = %f' % (i, j, cost, accuracy))
	ConvNet.save('./mnist.cnn', convnet)
end_time = time.time()
pprint('Time used to train CNN on MNIST: %f minutes' % ((end_time-start_time) / 60))
# Test accuracy
num_batches = test_size / batch_size
right_count = 0
for i in xrange(num_batches):
	minibatch = test_set[i*batch_size : (i+1)*batch_size]
	label = test_label[i*batch_size : (i+1)*batch_size]
	minibatch = minibatch.reshape((batch_size, 1, image_row, image_col))
	prediction = convnet.predict(minibatch)
	right_count += np.sum(prediction == label)
test_accuracy = right_count / float(test_size)
pprint('Test set accuracy: %f' % test_accuracy)




예제 #4
0
    def testCNNwithFineTuning(self):
        '''
		Test the performance of CNN with fine-tuning the word-embedding.
		'''
        pprint('CNN with fine-tuning experiment')
        conf_filename = './sentiment_cnn.conf'
        # Build the architecture of CNN
        start_time = time.time()
        configer = CNNConfiger(conf_filename)
        convnet = ConvNet(configer, verbose=True)
        end_time = time.time()
        pprint('Time used to build the architecture of CNN: %f seconds' %
               (end_time - start_time))
        # Training
        learn_rate = 1
        batch_size = configer.batch_size
        num_batches = self.train_size / batch_size
        start_time = time.time()
        # Define function to do the fine-tuning
        grad_to_input = T.grad(convnet.cost, convnet.input)
        compute_grad_to_input = theano.function(
            inputs=[convnet.input, convnet.truth], outputs=grad_to_input)
        # Begin training and fine-tuning the word-embedding matrix
        for i in xrange(configer.nepoch):
            right_count = 0
            tot_cost = 0
            # rate = learn_rate
            rate = learn_rate / (i / 100 + 1)
            for j in xrange(num_batches):
                # Record the information of each minibatch
                minibatch_len = list()
                minibatch_indices = list()
                # Dynamically building training matrix using current word-embedding matrix
                minibatch_txt = self.senti_train_txt[j * batch_size:(j + 1) *
                                                     batch_size]
                minibatch = np.zeros(
                    (batch_size, self.word_embedding.embedding_dim()),
                    dtype=floatX)
                for k, txt in enumerate(minibatch_txt):
                    words = txt.split()
                    words = [word.lower() for word in words]
                    vectors = np.asarray(
                        [self.word_embedding.wordvec(word) for word in words])
                    minibatch[k, :] = np.mean(vectors, axis=0)
                    # Record the length of each sentence
                    minibatch_len.append(len(words))
                    # Record the index of each word in each sentence
                    minibatch_indices.append([
                        self.word_embedding.word2index(word) for word in words
                    ])
                # Reshape into the form of input to CNN
                minibatch = minibatch.reshape(
                    (batch_size, 1, configer.image_row, configer.image_col))
                label = self.senti_train_label[j * batch_size:(j + 1) *
                                               batch_size]
                # Training
                cost, accuracy = convnet.train(minibatch, label, rate)
                prediction = convnet.predict(minibatch)
                right_count += np.sum(label == prediction)
                tot_cost += cost
                # Fine-tuning for word-vector matrix
                grad_minibatch = compute_grad_to_input(minibatch, label)
                grad_minibatch = grad_minibatch.reshape(
                    (batch_size, self.word_embedding.embedding_dim()))
                # Updating the word2vec matrix
                minibatch_len = np.asarray(minibatch_len)
                grad_minibatch /= minibatch_len[:, np.newaxis]
                for k, indices in enumerate(minibatch_indices):
                    for l in indices:
                        self.word_embedding._embedding[
                            l, :] -= 0.01 * rate * grad_minibatch[k, :]
            accuracy = right_count / float(self.train_size)
            pprint('Epoch %d, total cost: %f, overall accuracy: %f' %
                   (i, tot_cost, accuracy))
            if (i + 1) % 100 == 0:
                ConvNet.save('./sentiment.cnn', convnet)
        end_time = time.time()
        pprint(
            'Time used to train CNN on Sentiment analysis task: %f minutes.' %
            ((end_time - start_time) / 60))
        # Test
        num_batches = self.test_size / batch_size
        right_count = 0
        for i in xrange(num_batches):
            minibatch_txt = self.senti_test_txt[i * batch_size:(i + 1) *
                                                batch_size]
            minibatch = np.zeros(
                (batch_size, self.word_embedding.embedding_dim()),
                dtype=floatX)
            for j, txt in enumerate(minibatch_txt):
                words = txt.split()
                words = [word.lower() for word in words]
                vectors = np.asarray(
                    [self.word_embedding.wordvec(word) for word in words])
                minibatch[j, :] = np.mean(vectors, axis=0)
            # Reshape into the form of input to CNN
            minibatch = minibatch.reshape(
                (batch_size, 1, configer.image_row, configer.image_col))
            label = self.senti_test_label[i * batch_size:(i + 1) * batch_size]
            prediction = convnet.predict(minibatch)
            right_count += np.sum(prediction == label)
        test_accuracy = right_count / float(self.test_size)
        pprint('Test set accuracy: %f' % test_accuracy)
예제 #5
0
	def testCNNwithFineTuning(self):
		'''
		Test the performance of CNN with fine-tuning the word-embedding.
		'''
		pprint('CNN with fine-tuning experiment')
		conf_filename = './sentiment_cnn.conf'
		# Build the architecture of CNN
		start_time = time.time()
		configer = CNNConfiger(conf_filename)
		convnet = ConvNet(configer, verbose=True)
		end_time = time.time()
		pprint('Time used to build the architecture of CNN: %f seconds' % (end_time-start_time))
		# Training
		learn_rate = 1
		batch_size = configer.batch_size
		num_batches = self.train_size / batch_size
		start_time = time.time()
		# Define function to do the fine-tuning 
		grad_to_input = T.grad(convnet.cost, convnet.input)
		compute_grad_to_input = theano.function(inputs=[convnet.input, convnet.truth], outputs=grad_to_input)
		# Begin training and fine-tuning the word-embedding matrix
		for i in xrange(configer.nepoch):
			right_count = 0
			tot_cost = 0
			# rate = learn_rate
			rate = learn_rate / (i/100+1)
			for j in xrange(num_batches):
				# Record the information of each minibatch
				minibatch_len = list()
				minibatch_indices = list()
				# Dynamically building training matrix using current word-embedding matrix
				minibatch_txt = self.senti_train_txt[j*batch_size : (j+1)*batch_size]
				minibatch = np.zeros((batch_size, self.word_embedding.embedding_dim()), dtype=floatX)
				for k, txt in enumerate(minibatch_txt):
					words = txt.split()
					words = [word.lower() for word in words]
					vectors = np.asarray([self.word_embedding.wordvec(word) for word in words])
					minibatch[k, :] = np.mean(vectors, axis=0)
					# Record the length of each sentence
					minibatch_len.append(len(words))
					# Record the index of each word in each sentence
					minibatch_indices.append([self.word_embedding.word2index(word) for word in words])
				# Reshape into the form of input to CNN
				minibatch = minibatch.reshape((batch_size, 1, configer.image_row, configer.image_col))
				label = self.senti_train_label[j*batch_size : (j+1)*batch_size]
				# Training 
				cost, accuracy = convnet.train(minibatch, label, rate)
				prediction = convnet.predict(minibatch)
				right_count += np.sum(label == prediction)
				tot_cost += cost
				# Fine-tuning for word-vector matrix
				grad_minibatch = compute_grad_to_input(minibatch, label)
				grad_minibatch = grad_minibatch.reshape((batch_size, self.word_embedding.embedding_dim()))
				# Updating the word2vec matrix
				minibatch_len = np.asarray(minibatch_len)
				grad_minibatch /= minibatch_len[:, np.newaxis]
				for k, indices in enumerate(minibatch_indices):
					for l in indices:
						self.word_embedding._embedding[l, :] -= 0.01 * rate * grad_minibatch[k, :]
			accuracy = right_count / float(self.train_size)
			pprint('Epoch %d, total cost: %f, overall accuracy: %f' % (i, tot_cost, accuracy))
			if (i+1)%100 == 0:
				ConvNet.save('./sentiment.cnn', convnet)
		end_time = time.time()
		pprint('Time used to train CNN on Sentiment analysis task: %f minutes.' % ((end_time-start_time)/60))
		# Test
		num_batches = self.test_size / batch_size
		right_count = 0
		for i in xrange(num_batches):
			minibatch_txt = self.senti_test_txt[i*batch_size : (i+1)*batch_size]
			minibatch = np.zeros((batch_size, self.word_embedding.embedding_dim()), dtype=floatX)
			for j, txt in enumerate(minibatch_txt):
				words = txt.split()
				words = [word.lower() for word in words]
				vectors = np.asarray([self.word_embedding.wordvec(word) for word in words])
				minibatch[j, :] = np.mean(vectors, axis=0)
			# Reshape into the form of input to CNN
			minibatch = minibatch.reshape((batch_size, 1, configer.image_row, configer.image_col))
			label = self.senti_test_label[i*batch_size : (i+1)*batch_size]
			prediction = convnet.predict(minibatch)
			right_count += np.sum(prediction == label)
		test_accuracy = right_count / float(self.test_size)
		pprint('Test set accuracy: %f' % test_accuracy)