コード例 #1
0
ファイル: model.py プロジェクト: mozzielol/Report
	def __init__(self,type='nn'):
		self.num_classes = 10
		self.history = None
		self.epoch = 10
		self.verbose = True
		self.info = Info()

		if type == 'nn':
			self.model = Sequential()
			self.model.add(Dense(128,input_shape=(784,),activation='relu'))
			self.model.add(Dense(128,activation='relu'))
			self.model.add(Dense(self.num_classes,activation='softmax'))

		elif type == 'cnn':
			self.model = Sequential()
			self.model.add(Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3),activation='relu'))
			#model.add(Activation('relu'))
			self.model.add(Conv2D(32,(3, 3),activation='relu'))
			#model.add(Activation('relu'))
			self.model.add(MaxPooling2D(pool_size=(2, 2)))
			#model.add(Dropout(0.25))

			self.model.add(Conv2D(64, (3, 3), padding='same',activation='relu'))
			#model.add(Activation('relu'))
			self.model.add(Conv2D(64, (3,3),activation='relu'))
			#model.add(Activation('relu'))
			self.model.add(MaxPooling2D(pool_size=(2, 2)))
			#model.add(Dropout(0.25))

			self.model.add(Flatten())
			self.model.add(Dense(512,activation='relu'))
			#model.add(Activation('relu'))
			#model.add(Dropout(0.5))
			self.model.add(Dense(self.num_classes,activation='softmax'))
コード例 #2
0
ファイル: model.py プロジェクト: mozzielol/kalman
    def __init__(self):
        self.dim = 64
        self.num_classes = 10
        self.history = None
        self.epoch = 1
        input_shape = (32, 32, 3)

        self.info = Info()

        self.model = Sequential()
        self.model.add(
            Conv2D(32, (3, 3),
                   padding='same',
                   input_shape=input_shape,
                   activation='relu'))
        self.model.add(Conv2D(32, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Dropout(0.25))

        self.model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Dropout(0.25))

        self.model.add(Flatten())
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(self.num_classes, activation='softmax'))
コード例 #3
0
ファイル: model.py プロジェクト: mozzielol/Report
    def __init__(self):
        self.dim = 64
        self.num_classes = 10
        self.history = None
        self.epoch = 10

        self.info = Info()

        self.model = Sequential()
        self.model.add(Dense(50, input_shape=(784, ), activation='relu'))
        self.model.add(Dense(self.num_classes, activation='softmax'))

        self.tbCallBack = TensorBoard(log_dir='./logs/mnist_drift/kal/',
                                      histogram_freq=0,
                                      write_graph=True,
                                      write_grads=True,
                                      write_images=True,
                                      embeddings_freq=0,
                                      embeddings_layer_names=None,
                                      embeddings_metadata=None)
コード例 #4
0
	def __init__(self):
		self.dim = 64
		self.num_classes = 10
		self.history = None
		self.epoch = 10
		
		self.info = Info()
		self.count = 0

		
		self.model = Sequential()
		self.model.add(Dense(50,input_shape=(784,),activation='relu'))
		self.model.add(Dense(self.num_classes,activation='softmax'))

		self.y = self.model.output
		self.var_list = self.model.trainable_weights
コード例 #5
0
def run():
    pygame.init()
    pygame.font.init()
    set = Setting()
    screen = pygame.display.set_mode((set.width, set.height))
    info = Info(screen, set)
    pygame.display.set_caption("TicTac")

    blocks = Group()
    winpage = WinPage(screen, set, info)
    pausepage = PausePage(screen, set, info)
    pages = {}
    pages['Win'] = winpage
    pages['Pause'] = pausepage
    pages['Play'] = PlayPage(screen, set, info, blocks)
    create_board(screen, set, blocks)
    while True:
        check_event(screen, set, blocks, info, pages)
        update_screen(screen, set, blocks, info, pages)
コード例 #6
0
ファイル: model.py プロジェクト: mozzielol/Report
class Model(object):
	'''
	 - Initialize the model
  	 - important parameters:
  	 	- history: it will record all the accuracy on training dataset and validation dataset
	'''
	def __init__(self,type='nn'):
		self.num_classes = 10
		self.history = None
		self.epoch = 10
		self.verbose = True
		self.info = Info()

		if type == 'nn':
			self.model = Sequential()
			self.model.add(Dense(128,input_shape=(784,),activation='relu'))
			self.model.add(Dense(128,activation='relu'))
			self.model.add(Dense(self.num_classes,activation='softmax'))

		elif type == 'cnn':
			self.model = Sequential()
			self.model.add(Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3),activation='relu'))
			#model.add(Activation('relu'))
			self.model.add(Conv2D(32,(3, 3),activation='relu'))
			#model.add(Activation('relu'))
			self.model.add(MaxPooling2D(pool_size=(2, 2)))
			#model.add(Dropout(0.25))

			self.model.add(Conv2D(64, (3, 3), padding='same',activation='relu'))
			#model.add(Activation('relu'))
			self.model.add(Conv2D(64, (3,3),activation='relu'))
			#model.add(Activation('relu'))
			self.model.add(MaxPooling2D(pool_size=(2, 2)))
			#model.add(Dropout(0.25))

			self.model.add(Flatten())
			self.model.add(Dense(512,activation='relu'))
			#model.add(Activation('relu'))
			#model.add(Dropout(0.5))
			self.model.add(Dense(self.num_classes,activation='softmax'))
			#model.add(Activation('softmax'))

	


	#What data is used for validation
	def val_data(self,X_test,y_test):
		self.X_test = X_test
		self.y_test = y_test

	#train the model by normal gradient descent algorithm
	def fit(self,X_train,y_train):
		self.model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
		if self.history is None:
			self.history = self.model.fit(X_train,y_train,epochs=self.epoch,batch_size=128,verbose=self.verbose,validation_data=(self.X_test,self.y_test))
		else:
			history = self.model.fit(X_train,y_train,epochs=self.epoch,batch_size=128,verbose=self.verbose,validation_data=(self.X_test,self.y_test))
			self.history.history['acc'].extend(history.history['acc'])
			self.history.history['val_acc'].extend(history.history['val_acc'])

	def set_fisher_data(self,X,y):
		self.info.set_fisher_data(X,y)

	'''
	 - This function is used for 'kal' algorithm.
	 - The model will calculate the gradient on D1[batch0], and never access to D1 again
	'''
	def transfer(self,X_train,y_train,num=None):
		self.info.set_train_data(X_train,y_train)
		kalman_filter = Kalman_filter_modifier(info=self.info,num=num)
		history = self.model.fit(X_train,y_train,epochs=self.epoch,batch_size=128,verbose=self.verbose,callbacks=[kalman_filter],validation_data=(self.X_test,self.y_test))
		self.history.history['acc'].extend(history.history['acc'])
		self.history.history['val_acc'].extend(history.history['val_acc'])


	def save(self,name):
		import json
		with open('./logs/{}.txt'.format(name),'w') as f:
			json.dump(self.history.history,f)
		self.model.save('./models/{}.h5'.format(name))

	def evaluate(self,X_test,y_test):
		
		score=self.model.evaluate(X_test,y_test,batch_size=128)
		print("Convolutional neural network test loss:",score[0])
		print('Convolutional neural network test accuracy:',score[1])

		return score[1]

	def get_history(self):
		return self.history


	'''
	Plot the history of accuracy
	'''
	def plot(self,name,model,shift=2):
		plt.subplot(211)
		plt.title('accuracy on current training data')
		for i in range(shift):
			plt.vlines(self.epoch*(i+1),0,1,color='r',linestyles='dashed')
		
		plt.plot(self.history.history['acc'],label='{}'.format(model))
		plt.ylabel('acc')
		plt.xlabel('training time')
		plt.legend(loc='upper right')
		plt.subplot(212)
		plt.title('validation accuracy on original data')
		plt.plot(self.history.history['val_acc'],label='{}'.format(model))
		plt.ylabel('acc')
		plt.xlabel('training time')
		for i in range(shift):
			plt.vlines(self.epoch*(i+1),0,1,color='r',linestyles='dashed')
		plt.legend(loc='upper right')
		plt.subplots_adjust(wspace=1,hspace=1)
		plt.savefig('./images/{}.png'.format(name))
		display.display(plt.gcf())
		display.clear_output(wait=True)

	def enable_tensorboard(self):
		self.tbCallBack = TensorBoard(log_dir='./logs/mnist_drift/kal/',  
		histogram_freq=0,  
		write_graph=True,  
		write_grads=True, 
		write_images=True,
		embeddings_freq=0, 
		embeddings_layer_names=None, 
		embeddings_metadata=None)
コード例 #7
0
ファイル: model.py プロジェクト: mozzielol/test1
class cnn_model(object):
	'''
	 - Initialize the model
  	 - important parameters:
  	 	- history: it will record all the accuracy on training dataset and validation dataset
	'''
	def __init__(self):
		self.dim = 64
		self.num_classes = 10
		self.history = None
		self.epoch = 10
		
		self.info = Info()

		
		self.model = Sequential()
		self.model.add(Dense(50,input_shape=(784,),activation='relu'))
		self.model.add(Dense(self.num_classes,activation='softmax'))

		self.tbCallBack = TensorBoard(log_dir='./logs/mnist_drift/kal/',  
		histogram_freq=0,  
		write_graph=True,  
		write_grads=True, 
		write_images=True,
		embeddings_freq=0, 
		embeddings_layer_names=None, 
		embeddings_metadata=None)


	#What data is used for validation
	def val_data(self,X_test,y_test):
		self.X_test = X_test
		self.y_test = y_test

	#train the model by normal gradient descent algorithm
	def fit(self,X_train,y_train):
		self.model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
		if self.history is None:
			self.history = self.model.fit(X_train,y_train,epochs=self.epoch,batch_size=128,verbose=True,validation_data=(self.X_test,self.y_test))
		else:
			history = self.model.fit(X_train,y_train,epochs=self.epoch,batch_size=128,verbose=True,validation_data=(self.X_test,self.y_test))
			self.history.history['acc'].extend(history.history['acc'])
			self.history.history['val_acc'].extend(history.history['val_acc'])

		self.info.add_data(X_train,y_train)

	'''
	 - This function is used for 'kal' algorithm.
	 - The model will calculate the gradient on D1[batch0], and never access to D1 again
	'''
	def transfer(self,X_train,y_train,num=2):
		self.info.add_data(X_train,y_train)
		opcallback = op_pre_callback(info=self.info,use_pre=False)
		#self.model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
		history = self.model.fit(X_train,y_train,epochs=self.epoch,batch_size=128,verbose=True,callbacks=[opcallback],validation_data=(self.X_test,self.y_test))
		self.history.history['acc'].extend(history.history['acc'])
		self.history.history['val_acc'].extend(history.history['val_acc'])

	'''
	 - This function is used for 'kal_pre' algorithm
	 - The model will access to D1 to calculate the gradient during all training process
	'''
	def use_pre(self,X_train,y_train):
		opcallback = op_pre_callback(use_pre=True)
		self.model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
		history = self.model.fit(X_train,y_train,epochs=self.epoch,batch_size=128,verbose=True,callbacks=[opcallback],validation_data=(self.X_test,self.y_test))
		self.history.history['acc'].extend(history.history['acc'])
		self.history.history['val_acc'].extend(history.history['val_acc'])

	'''
	 - This funciton is used for 'kal_cur' algorithm
	 - The model won't access to D1. All the gradients are calculated on current training data
	'''
	def use_cur(self,X_train,y_train):
		self.info.set_value('X_train',X_train)
		self.info.set_value('y_train',y_train)
		opcallback = op_pre_callback(use_pre=False)
		self.model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
		history = self.model.fit(X_train,y_train,epochs=self.epoch,batch_size=128,verbose=True,callbacks=[opcallback],validation_data=(self.X_test,self.y_test))
		self.history.history['acc'].extend(history.history['acc'])
		self.history.history['val_acc'].extend(history.history['val_acc'])

	'''
	 - A simple transer learning function
	 - It will pop the last two layers, and add another classifier to the end of the model
	'''
	def nor_trans(self,X_train,y_train):
		self.model.pop()
		self.model.pop()
		
		for layer in self.model.layers:
			layer.trainable = False
		self.model.add(Dense(32,activation='relu'))
		self.model.add(Dense(self.num_classes,activation='softmax'))
		self.model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
		history = self.model.fit(X_train,y_train,epochs=self.epoch,batch_size=128,verbose=True,validation_data=(self.X_test,self.y_test))
		self.history.history['acc'].extend(history.history['acc'])
		self.history.history['val_acc'].extend(history.history['val_acc'])


	def save(self,name):
		import json
		with open('./logs/{}.txt'.format(name),'w') as f:
			json.dump(self.history.history,f)
		self.model.save('./models/{}.h5'.format(name))

	def evaluate(self,X_test,y_test):
		
		score=self.model.evaluate(X_test,y_test,batch_size=128)
		print("Convolutional neural network test loss:",score[0])
		print('Convolutional neural network test accuracy:',score[1])

		return score[1]

	def get_history(self):
		return self.history


	'''
	Plot the history of accuracy
	'''
	def plot(self,name,model,shift=2):
		plt.subplot(211)
		plt.title('accuracy on current training data')
		for i in range(shift):
			plt.vlines(self.epoch*(i+1),0,1,color='r',linestyles='dashed')
		
		plt.plot(self.history.history['acc'],label='{}'.format(model))
		plt.ylabel('acc')
		plt.xlabel('training time')
		plt.legend(loc='upper right')
		plt.subplot(212)
		plt.title('validation accuracy on original data')
		plt.plot(self.history.history['val_acc'],label='{}'.format(model))
		plt.ylabel('acc')
		plt.xlabel('training time')
		for i in range(shift):
			plt.vlines(self.epoch*(i+1),0,1,color='r',linestyles='dashed')
		plt.legend(loc='upper right')
		plt.subplots_adjust(wspace=1,hspace=1)
		plt.savefig('./images/{}.png'.format(name))
コード例 #8
0
ファイル: model.py プロジェクト: mozzielol/kalman
class cnn_model(object):
    '''
     - Initialize the model
     - important parameters:
        - history: it will record all the accuracy on training dataset and validation dataset
    '''
    def __init__(self):
        self.dim = 64
        self.num_classes = 10
        self.history = None
        self.epoch = 1
        input_shape = (32, 32, 3)

        self.info = Info()

        self.model = Sequential()
        self.model.add(
            Conv2D(32, (3, 3),
                   padding='same',
                   input_shape=input_shape,
                   activation='relu'))
        self.model.add(Conv2D(32, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Dropout(0.25))

        self.model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
        self.model.add(Conv2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Dropout(0.25))

        self.model.add(Flatten())
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(self.num_classes, activation='softmax'))

    #What data is used for validation
    def val_data(self, X_test, y_test):
        self.X_test = X_test
        self.y_test = y_test

    #train the model by normal gradient descent algorithm
    def fit(self, X_train, y_train):
        self.model.compile(optimizer='sgd',
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])
        if self.history is None:
            self.history = self.model.fit(X_train,
                                          y_train,
                                          epochs=self.epoch,
                                          batch_size=128,
                                          verbose=True,
                                          validation_data=(self.X_test,
                                                           self.y_test))
        else:
            history = self.model.fit(X_train,
                                     y_train,
                                     epochs=self.epoch,
                                     batch_size=128,
                                     verbose=True,
                                     validation_data=(self.X_test,
                                                      self.y_test))
            self.history.history['acc'].extend(history.history['acc'])
            self.history.history['val_acc'].extend(history.history['val_acc'])

        self.info.add_data(X_train, y_train)

    '''
     - This function is used for 'kal' algorithm.
     - The model will calculate the gradient on D1[batch0], and never access to D1 again
    '''

    def transfer(self, X_train, y_train, num=2):
        self.info.add_data(X_train, y_train)
        opcallback = op_pre_callback(info=self.info, use_pre=False)
        #self.model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
        history = self.model.fit(X_train,
                                 y_train,
                                 epochs=self.epoch,
                                 batch_size=128,
                                 verbose=True,
                                 callbacks=[opcallback],
                                 validation_data=(self.X_test, self.y_test))
        self.history.history['acc'].extend(history.history['acc'])
        self.history.history['val_acc'].extend(history.history['val_acc'])

    def save(self, name):
        import json
        with open('./logs/{}.txt'.format(name), 'w') as f:
            json.dump(self.history.history, f)
        self.model.save('./models/{}.h5'.format(name))

    def evaluate(self, X_test, y_test):

        score = self.model.evaluate(X_test, y_test, batch_size=128)
        print("Convolutional neural network test loss:", score[0])
        print('Convolutional neural network test accuracy:', score[1])

        return score[1]

    def get_history(self):
        return self.history

    '''
    Plot the history of accuracy
    '''

    def plot(self, name, model, shift=2):
        plt.subplot(211)
        plt.title('accuracy on current training data')
        for i in range(shift):
            plt.vlines(self.epoch * (i + 1),
                       0,
                       1,
                       color='r',
                       linestyles='dashed')

        plt.plot(self.history.history['acc'], label='{}'.format(model))
        plt.ylabel('acc')
        plt.xlabel('training time')
        plt.legend(loc='upper right')
        plt.subplot(212)
        plt.title('validation accuracy on original data')
        plt.plot(self.history.history['val_acc'], label='{}'.format(model))
        plt.ylabel('acc')
        plt.xlabel('training time')
        for i in range(shift):
            plt.vlines(self.epoch * (i + 1),
                       0,
                       1,
                       color='r',
                       linestyles='dashed')
        plt.legend(loc='upper right')
        plt.subplots_adjust(wspace=1, hspace=1)
        plt.savefig('./images/{}.png'.format(name))