コード例 #1
0
ファイル: drcn.py プロジェクト: ghif/drcn
	def create_model(self, input_shape=(1, 32, 32), dense_dim=1000, dy=10, nb_filters=[64, 128], kernel_size=(3, 3), pool_size=(2, 2), 
		dropout=0.5, bn=True, output_activation='softmax', opt='adam'):
		"""
		Create DRCN model: convnet model followed by conv. autoencoder

		Args:
			_input (Tensor)    	   : input layer
			dense_dim (int)            : dimensionality of the final dense layers 
			dy (int)	   	   : output dimensionality
			nb_filter (list)   	   : list of #Conv2D filters
			kernel_size (tuple)	   : Conv2D kernel size
			pool_size (tuple)  	   : MaxPool kernel size
			dropout (float)    	   : dropout rate
			bn (boolean)	   	   : batch normalization mode
			output_activation (string) : act. function for output layer
			opt (string)		   : optimizer
		"""	
		[d1, d2, c] = input_shape

		if opt == 'adam':
			opt = Adam(lr=3e-4)
		elif opt == 'rmsprop':
			opt = RMSprop(lr=1e-4)


		_input = Input(shape=input_shape)
		
		# Create ConvNet
		self.create_convnet(_input, dense_dim=dense_dim, dy=dy, nb_filters=nb_filters, 
			kernel_size=kernel_size, pool_size=pool_size, dropout=dropout, 
			bn=bn, output_activation=output_activation, opt=opt) 
		
		# Create ConvAE, encoder functions are shared with ConvNet
		_h = _input
		
		# Reconstruct Conv2D layers
		for i, nf in enumerate(nb_filters):
			_h = self.enc_functions[i](_h)
			_h = Activation('relu')(_h)
			if i < 2:
				_h = MaxPooling2D(pool_size=pool_size, padding='same')(_h)


		[_, wflat, hflat, cflat] = _h.get_shape().as_list()	
		_h = Flatten()(_h)
		
		# Dense layers
		for i in range(len(nb_filters), len(self.enc_functions)):
			_h = self.enc_functions[i](_h)
			_h = Activation('relu')(_h)
			
		# Decoder
		_h = Dense(dense_dim)(_h)
		_h = Activation('relu')(_h)
		
		_xdec = Dense(wflat*hflat*cflat)(_h)
		_xdec = Activation('relu')(_xdec)
		_xdec = Reshape((wflat, hflat, nb_filters[-1]))(_xdec)
		i = 0
		for nf in reversed(nb_filters):
			_xdec = Conv2D(nf, kernel_size, padding='same')(_xdec)
			_xdec = Activation('relu')(_xdec)
			
			if i > 0:
				_xdec = UpSampling2D(size=pool_size)(_xdec)	
			i += 1
		
		_xdec = Conv2D(c, kernel_size, padding='same', activation=clip_relu)(_xdec)	

		self.convae_model = Model(input=_input, output=_xdec)	
		self.convae_model.compile(loss='mse', optimizer=opt)
		print(self.convae_model.summary())
コード例 #2
0
ファイル: DeepConv.py プロジェクト: arqcenick/DeepNewton
    act3 = Activation('relu')(conv3)
    #conv4 = Convolution2D(64, 3, 3,border_mode='same')(act3)
    #act4 = Activation('relu')(conv4)
    pool2 = MaxPooling2D(pool_size=(2, 2))(act3)
    conv5 = Conv2D(8, (3, 3), padding='same')(pool2)
    act5 = Activation('relu')(conv5)
    pool3 = MaxPooling2D(pool_size=(2, 2))(act5)
    conv6 = Conv2D(8, (3, 3), padding='same')(pool3)
    act6 = Activation('relu')(conv6)
    pool4 = MaxPooling2D(pool_size=(2, 2))(act6)
    conv7 = Conv2D(16, (3, 3), padding='same')(pool4)
    act7 = Activation('tanh')(conv7)
    #pool5 = MaxPooling2D(pool_size=(2, 2))(act7)
    #conv8 = Convolution2D(32, 3, 3,border_mode='same')(pool5)
    #act8 = Activation('relu')(conv8)
    print(act7.get_shape())
    encoder = Model(input=[input_lr], output=[act7])
    encoder.compile(optimizer=adam, loss='mse', metrics=['accuracy'])

    code_lr = Input(shape=(8, 8, 16), dtype='float32', name='decode_input')
    print(code_lr.get_shape())
    #dgaussian = GaussianNoise(0.1)(code_lr)

    #deconv0 = Convolution2D(512, 3, 3, border_mode='same')(code_lr)
    #deact0 = Activation('relu')(deconv0)
    #dfc1 = Dense(32*4*4, activation='relu')(code_lr)
    #dfc2 = Dense(16*8*8, activation='relu')(dgaussian)
    #rshp1 = Reshape((16,8,8))(dfc2)
    #up0 = UpSampling2D(size=(2,2))(rshp1)
    #deconv1 = Convolution2D(32, 3, 3, border_mode='same')(up0)
    #deact1 = Activation('relu')(deconv1)
コード例 #3
0
    def create_model(self,
                     input_shape=(1, 32, 32),
                     dense_dim=1000,
                     dy=10,
                     nb_filters=[64, 128],
                     kernel_size=(3, 3),
                     pool_size=(2, 2),
                     dropout=0.5,
                     bn=True,
                     output_activation='softmax',
                     opt='adam'):
        """
		Create DRCN model: convnet model followed by conv. autoencoder

		Args:
			_input (Tensor)    	   : input layer
			dense_dim (int)            : dimensionality of the final dense layers 
			dy (int)	   	   : output dimensionality
			nb_filter (list)   	   : list of #Conv2D filters
			kernel_size (tuple)	   : Conv2D kernel size
			pool_size (tuple)  	   : MaxPool kernel size
			dropout (float)    	   : dropout rate
			bn (boolean)	   	   : batch normalization mode
			output_activation (string) : act. function for output layer
			opt (string)		   : optimizer
		"""
        [d1, d2, c] = input_shape

        if opt == 'adam':
            opt = Adam(lr=3e-4)
        elif opt == 'rmsprop':
            opt = RMSprop(lr=1e-4)

        _input = Input(shape=input_shape)

        # Create ConvNet
        self.create_convnet(_input,
                            dense_dim=dense_dim,
                            dy=dy,
                            nb_filters=nb_filters,
                            kernel_size=kernel_size,
                            pool_size=pool_size,
                            dropout=dropout,
                            bn=bn,
                            output_activation=output_activation,
                            opt=opt)

        # Create ConvAE, encoder functions are shared with ConvNet
        _h = _input

        # Reconstruct Conv2D layers
        for i, nf in enumerate(nb_filters):
            _h = self.enc_functions[i](_h)
            _h = Activation('relu')(_h)
            if i < 2:
                _h = MaxPooling2D(pool_size=pool_size, padding='same')(_h)

        [_, wflat, hflat, cflat] = _h.get_shape().as_list()
        _h = Flatten()(_h)

        # Dense layers
        for i in range(len(nb_filters), len(self.enc_functions)):
            _h = self.enc_functions[i](_h)
            _h = Activation('relu')(_h)

        # Decoder
        _h = Dense(dense_dim)(_h)
        _h = Activation('relu')(_h)

        _xdec = Dense(wflat * hflat * cflat)(_h)
        _xdec = Activation('relu')(_xdec)
        _xdec = Reshape((wflat, hflat, nb_filters[-1]))(_xdec)
        i = 0
        for nf in reversed(nb_filters):
            _xdec = Conv2D(nf, kernel_size, padding='same')(_xdec)
            _xdec = Activation('relu')(_xdec)

            if i > 0:
                _xdec = UpSampling2D(size=pool_size)(_xdec)
            i += 1

        _xdec = Conv2D(c, kernel_size, padding='same',
                       activation=clip_relu)(_xdec)

        self.convae_model = Model(input=_input, output=_xdec)
        self.convae_model.compile(loss='mse', optimizer=opt)
        print(self.convae_model.summary())