コード例 #1
0
ファイル: mlp_rot.py プロジェクト: jonomon/deeplearn
def buidl_model(x_dim):
    model = Sequential()
    # Dense(64) is a fully-connected layer with 64 hidden units.
    # in the first layer, you must specify the expected input data shape:
    # here, 20-dimensional vectors.
    model.add(Dense(output_dim=64, input_dim=x_dim, init='uniform',
                    W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
    model.add(Activation('linear'))
    model.add(Dropout(0.5))

    model.add(Dense(output_dim=64, input_dim=64, init='uniform',
                    W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
    model.add(Activation('linear'))
    # model.add(Dropout(0.5))

    model.add(Dense(output_dim=1, input_dim=64, init='uniform',
                    W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
    model.add(Activation("linear"))

    # model.add(Dense(output_dim=1, input_dim=x_dim, init='uniform'))
    # model.add(Activation("linear"))

    # model.add(Activation('softmax'))

    # sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='mean_squared_error',
                  class_mode='binary',
                  optimizer='rmsprop')

    return model
コード例 #2
0
ファイル: util_model.py プロジェクト: gmaher/tcl_code
def OBG_FCN(FCN,OBP_FCN,input_shape=(64,64,1), Nfilters=32, Wfilter=3, output_channels=1, l2_reg=0):
    x = Input(shape=input_shape)

    fcn_out = FCN(x)

    obp_out = OBP_FCN(x)

    d = Convolution2D(Nfilters,Wfilter,Wfilter,activation='relu', border_mode='same',
    W_regularizer=l2(l2_reg), b_regularizer=l2(l2_reg))(obp_out)
    d = BatchNormalization()(d)
    d = Convolution2D(Nfilters,Wfilter,Wfilter,activation='relu', border_mode='same',
    W_regularizer=l2(l2_reg), b_regularizer=l2(l2_reg))(d)
    d = BatchNormalization()(d)
    d = Convolution2D(Nfilters,Wfilter,Wfilter,activation='relu', border_mode='same',
    W_regularizer=l2(l2_reg), b_regularizer=l2(l2_reg))(d)
    d = BatchNormalization()(d)
    d = Convolution2D(Nfilters,Wfilter,Wfilter,activation='relu', border_mode='same',
    W_regularizer=l2(l2_reg), b_regularizer=l2(l2_reg))(d)
    d = BatchNormalization()(d)
    d = Convolution2D(output_channels,Wfilter,Wfilter,activation='sigmoid', border_mode='same',
    W_regularizer=l2(l2_reg), b_regularizer=l2(l2_reg))(d)

    #merge
    d = merge([d,fcn_out], mode='mul')
    OBG_FCN = Model(x,d)
    return OBG_FCN
コード例 #3
0
def getVggModel():
    """Pretrained VGG16 model with fine-tunable last two layers"""
    input_image = Input(shape = (160,320,3))
    
    model = Sequential()
    model.add(Lambda(lambda x: x/255.0 -0.5,input_shape=(160,320,3)))
    model.add(Cropping2D(cropping=((70,25),(0,0))))
    
    base_model = VGG16(input_tensor=input_image, include_top=False)
        
    for layer in base_model.layers[:-3]:
        layer.trainable = False

    W_regularizer = l2(0.01)

    x = base_model.get_layer("block5_conv3").output
    x = AveragePooling2D((2, 2))(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)
    x = Flatten()(x)
    x = Dense(4096, activation="elu", W_regularizer=l2(0.01))(x)
    x = Dropout(0.5)(x)
    x = Dense(2048, activation="elu", W_regularizer=l2(0.01))(x)
    x = Dense(2048, activation="elu", W_regularizer=l2(0.01))(x)
    x = Dense(1, activation="linear")(x)
    return Model(input=input_image, output=x)
コード例 #4
0
ファイル: activity_model.py プロジェクト: datamath28/siamese
def create_base_network(input_shape):
    """
    Base network to be shared (eq. to feature extraction).
    This is shared among the 'siamese' embedding as well as the
    more traditional classification problem
    """
    seq = Sequential()
    seq.add(Convolution2D(L1_FILTERS, 8, 1,
                          border_mode='valid',
                          activation='relu',
                          input_shape=input_shape,
                          name="input"
                          ))
    seq.add(MaxPooling2D(pool_size=(2, 1)))
    seq.add(Convolution2D(L2_FILTERS, 4, 1,
                          border_mode='valid',
                          activation='relu'
                          ))
    seq.add(MaxPooling2D(pool_size=(2, 1)))
    if DROPOUT:
        seq.add(Dropout(CONVO_DROPOUT_FRACTION))
    seq.add(Flatten())
    seq.add(Dense(128, activation='relu',
                  ))
    if DROPOUT:
        seq.add(Dropout(DROPOUT_FRACTION))
    seq.add(Dense(128, activation='relu',
                  W_regularizer=l2(0.01),
                  b_regularizer=l2(0.01)
                  ))
    if DROPOUT:
        seq.add(Dropout(DROPOUT_FRACTION))

    return seq
コード例 #5
0
ファイル: resnet.py プロジェクト: zishanAhmad/attendance-1
    def conv2D_bn_relu(self, x, nb_filter, nb_row, nb_col,
                       border_mode='valid', subsample=(1, 1),
                       activation='relu', batch_norm=USE_BN,
                       padding=(0, 0), weight_decay=WEIGHT_DECAY,
                       dim_ordering=DIM_ORDERING, name=None):
        '''Utility function to apply to a tensor a module conv + BN + ReLU
        with optional weight decay (L2 weight regularization).
        '''
        if weight_decay:
            W_regularizer = regularizers.l2(weight_decay)
            b_regularizer = regularizers.l2(weight_decay)
        else:
            W_regularizer = None
            b_regularizer = None

        if padding != (0, 0):
            x = ZeroPadding2D(padding)(x)
        x = Convolution2D(nb_filter, nb_row, nb_col,
                          subsample=subsample,
                          border_mode=border_mode,
                          W_regularizer=W_regularizer,
                          b_regularizer=b_regularizer,
                          dim_ordering=DIM_ORDERING,
                          name=name)(x)
        if batch_norm:
            if name == 'conv1':
                bn_name = 'bn_' + name
            else:
                bn_name = 'scale' + name.replace('res', '')
            x = BatchNormalization(name=bn_name)(x)
        if activation == 'relu':
            x = Activation('relu')(x)
        return x
コード例 #6
0
def prep_model(glove, dropout=0, l2reg=1e-4):
    model = Graph()

    # Process sentence embeddings
    model.add_input(name='e0', input_shape=(glove.N,))
    model.add_input(name='e1', input_shape=(glove.N,))
    model.add_node(name='e0_', input='e0',
                   layer=Dropout(dropout))
    model.add_node(name='e1_', input='e1',
                   layer=Dropout(dropout))

    # Generate element-wise features from the pair
    # (the Activation is a nop, merge_mode is the important part)
    model.add_node(name='sum', inputs=['e0_', 'e1_'], layer=Activation('linear'), merge_mode='sum')
    model.add_node(name='mul', inputs=['e0_', 'e1_'], layer=Activation('linear'), merge_mode='mul')

    # Use MLP to generate classes
    model.add_node(name='hidden', inputs=['sum', 'mul'], merge_mode='concat',
                   layer=Dense(50, W_regularizer=l2(l2reg)))
    model.add_node(name='hiddenS', input='hidden',
                   layer=Activation('sigmoid'))
    model.add_node(name='out', input='hiddenS',
                   layer=Dense(6, W_regularizer=l2(l2reg)))
    model.add_node(name='outS', input='out',
                   layer=Activation('softmax'))

    model.add_output(name='classes', input='outS')
    return model
コード例 #7
0
ファイル: CDAE.py プロジェクト: henry0312/CDAE
def create(I, U, K, hidden_activation, output_activation, q=0.5, l=0.01):
    '''
    create model
    Reference:
      Yao Wu, Christopher DuBois, Alice X. Zheng, Martin Ester.
        Collaborative Denoising Auto-Encoders for Top-N Recommender Systems.
          The 9th ACM International Conference on Web Search and Data Mining (WSDM'16), p153--162, 2016.

    :param I: number of items
    :param U: number of users
    :param K: number of units in hidden layer
    :param hidden_activation: activation function of hidden layer
    :param output_activation: activation function of output layer
    :param q: drop probability
    :param l: regularization parameter of L2 regularization
    :return: CDAE
    :rtype: keras.models.Model
    '''
    x_item = Input((I,), name='x_item')
    h_item = Dropout(q)(x_item)
    h_item = Dense(K, W_regularizer=l2(l), b_regularizer=l2(l))(h_item)

    # dtype should be int to connect to Embedding layer
    x_user = Input((1,), dtype='int32', name='x_user')
    h_user = Embedding(input_dim=U, output_dim=K, input_length=1, W_regularizer=l2(l))(x_user)
    h_user = Flatten()(h_user)

    h = merge([h_item, h_user], mode='sum')
    if hidden_activation:
        h = Activation(hidden_activation)(h)
    y = Dense(I, activation=output_activation)(h)

    return Model(input=[x_item, x_user], output=y)
コード例 #8
0
    def __init__(self,graph, input_node, input_shape, forward_shapes,config):
        self.graph = graph
        self.input_node = input_node
        self.config = config

        self.input_shape = input_shape
        self.dim_ordering = config['dim_ordering']
        if self.dim_ordering == 'th':
            self.depth_axis = 2
            self.steps_axis = 3
        else:
            self.depth_axis = 3
            self.steps_axis = 2

        #TODO: from here
        self.initial_upsampling_size = config['googlenet_config']['output_pooling']['size']
        self.initial_upsampling_type = config['googlenet_config']['output_pooling']['type']


        self.W_regularizer = l2(config['W_regularizer_value'])
        self.b_regularizer = l2(config['b_regularizer_value'])
        self.activity_regularizer = activity_l2(config['activity_regularizer_value'])
        self.init = config['init']
        self.activator = Activation(config['decoder_activator'])

        output_name, output_shape = self.initial_upsampling()
        inception = TDBackwardsInception(self.graph, output_name,output_shape,forward_shapes, config)
        output_name,output_shape = inception.result
        self.result,self.output_shape = self.reverse_conv_layers(output_name,output_shape)
コード例 #9
0
ファイル: normalization_test.py プロジェクト: 95vidhi/keras
def test_basic_batchnorm():
    layer_test(normalization.BatchNormalization,
               kwargs={'momentum': 0.9,
                       'epsilon': 0.1,
                       'gamma_regularizer': regularizers.l2(0.01),
                       'beta_regularizer': regularizers.l2(0.01)},
               input_shape=(3, 4, 2))
    layer_test(normalization.BatchNormalization,
               kwargs={'momentum': 0.9,
                       'epsilon': 0.1,
                       'axis': 1},
               input_shape=(3, 4, 2))
    layer_test(normalization.BatchNormalization,
               kwargs={'gamma_initializer': 'ones',
                       'beta_initializer': 'ones',
                       'moving_mean_initializer': 'zeros',
                       'moving_variance_initializer': 'ones'},
               input_shape=(3, 4, 2, 4))
    if K.backend() != 'theano':
        layer_test(normalization.BatchNormalization,
                   kwargs={'momentum': 0.9,
                           'epsilon': 0.1,
                           'axis': 1,
                           'scale': False,
                           'center': False},
                   input_shape=(3, 4, 2, 4))
コード例 #10
0
ファイル: SAE.py プロジェクト: Icedeath/Backup-zty
def Net_model(lr=0.001,decay=1e-6,momentum=0.9):
    model = Sequential()
    model.add(Dense(200, input_dim=15,W_regularizer=l2(0.01)))
    model.add(BN(epsilon=1e-06, mode=0, axis=1, momentum=momentum))
    model.add(Activation(ELU(alpha=1.0)))
    model.add(Dropout(0.0))
    
    model.add(Dense(130,W_regularizer=l2(0.01))) #Full connection
    model.add(BN(epsilon=1e-06, mode=0, axis=1, momentum=momentum))
    model.add(Activation(ELU(alpha=1.0)))
    model.add(Dropout(0.0))     
    
    model.add(Dense(80,W_regularizer=l2(0.01))) #Full connection
    model.add(BN(epsilon=1e-06, mode=0, axis=1, momentum=momentum))
    model.add(Activation(ELU(alpha=1.0)))
    model.add(Dropout(0.0))  
    
    model.add(Dense(30,W_regularizer=l2(0.01))) #Full connection
    model.add(BN(epsilon=1e-06, mode=0, axis=1, momentum=momentum))
    model.add(Activation(ELU(alpha=1.0)))
    model.add(Dropout(0.0))  
    
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    return model
コード例 #11
0
    def __init__(self,graph, input_node, input_shape, config):
        self.graph = graph
        self.input_node = input_node
        self.config = config

        self.input_shape = input_shape
        self.dim_ordering = config['dim_ordering']
        if self.dim_ordering == 'th':
            self.depth_axis = 2
            self.steps_axis = 3
        else:
            self.depth_axis = 3
            self.steps_axis = 2

        self.final_pool_size = config['googlenet_config']['output_pooling']['size']
        self.final_pool_type = config['googlenet_config']['output_pooling']['type']


        self.W_regularizer = l2(config['W_regularizer_value'])
        self.b_regularizer = l2(config['b_regularizer_value'])
        self.activity_regularizer = activity_l2(config['activity_regularizer_value'])
        self.init = config['init']
        if config['encoder_activator'] == 'prelu':
            self.activator = PReLU(init=self.init)
        #if want to try different activator need to specify here
        else:
            self.activator = Activation(config['encoder_activator'])

        output_name,output_shape = self.first_conv_layers()
        inception = TDInception(self.graph, output_name,output_shape,config)
        output_name,output_shape = inception.result
        self.result, self.output_shape = self.final_pool(output_name, output_shape)
コード例 #12
0
ファイル: resnet_helpers.py プロジェクト: new-2017/Keras-FCN
    def f(input_tensor):
        nb_filter1, nb_filter2, nb_filter3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        x = Convolution2D(nb_filter1, 1, 1, subsample=strides,
                          name=conv_name_base + '2a', W_regularizer=l2(weight_decay))(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Convolution2D(nb_filter2, kernel_size, kernel_size, border_mode='same',
                          name=conv_name_base + '2b', W_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c', W_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)

        shortcut = Convolution2D(nb_filter3, 1, 1, subsample=strides,
                                 name=conv_name_base + '1', W_regularizer=l2(weight_decay))(input_tensor)
        shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1', momentum=batch_momentum)(shortcut)

        x = merge([x, shortcut], mode='sum')
        x = Activation('relu')(x)
        return x
コード例 #13
0
	def policy_head(self, x):

		x = Conv2D(
		filters = 2
		, kernel_size = (1,1)
		, data_format="channels_first"
		, padding = 'same'
		, use_bias=False
		, activation='linear'
		, kernel_regularizer = regularizers.l2(self.reg_const)
		)(x)

		x = BatchNormalization(axis=1)(x)
		x = LeakyReLU()(x)

		x = Flatten()(x)

		x = Dense(
			self.output_dim
			, use_bias=False
			, activation='linear'
			, kernel_regularizer=regularizers.l2(self.reg_const)
			, name = 'policy_head'
			)(x)

		return (x)
コード例 #14
0
ファイル: cnn.py プロジェクト: dmitriydligach/Deep
def get_model(cfg, init_vectors, num_of_features):
  """Model definition"""

  model = Sequential()
  model.add(Embedding(input_dim=num_of_features,
                      output_dim=cfg.getint('cnn', 'embdims'),
                      input_length=maxlen,
                      trainable=True,
                      weights=init_vectors))
  model.add(Conv1D(filters=cfg.getint('cnn', 'filters'),
                   kernel_size=cfg.getint('cnn', 'filtlen'),
                   activation='relu'))
  model.add(GlobalMaxPooling1D())

  model.add(Dropout(cfg.getfloat('cnn', 'dropout')))
  model.add(Dense(
    cfg.getint('cnn', 'hidden'),
    kernel_regularizer=regularizers.l2(0.001)))
  model.add(Activation('relu'))

  model.add(Dropout(cfg.getfloat('cnn', 'dropout')))
  model.add(Dense(
    classes,
    kernel_regularizer=regularizers.l2(0.001)))
  model.add(Activation('softmax'))

  return model
コード例 #15
0
ファイル: cnn.py プロジェクト: Xls1994/DeepLearning
 def buildModel():
     from keras.regularizers import l2
     print('xxx')
     main_inputs = Input(shape=(maxlen,), dtype='int32', name='main_input')
     inputs = Embedding(max_features, embedding_size, input_length=maxlen, weights=[WordEm])(main_inputs)
     # x =Dropout(0.25)(inputs)
     convs = []
     filter_sizes = (2, 3, 4)
     for fsz in filter_sizes:
         conv = Convolution1D(nb_filter=nb_filter,
                              filter_length=fsz,
                              border_mode='valid',
                              activation='relu',
                              subsample_length=1,
                              W_regularizer=l2(l=0.01)
                              )(inputs)
         pool = MaxPooling1D(pool_length=2)(conv)
         flatten = Flatten()(pool)
         convs.append(flatten)
     out = Merge(mode='concat',concat_axis=1)(convs)
     # out =GlobalMaxPooling1D()(convs)
     out =BatchNormalization()(out)
     # out =LSTM(lstm_output_size,activation='relu')(out)
     predict = Dense(2, activation='softmax',W_regularizer=l2(0.01))(out)
     model = Model(input=main_inputs, output=predict)
     return model
コード例 #16
0
ファイル: train_utils.py プロジェクト: xiaoda99/keras
def build_rlstm(input_dim, h0_dim=40, h1_dim=None, output_dim=1, 
                       rec_layer_type=ReducedLSTMA, rec_layer_init='zero', fix_b_f=False,
                       layer_type=TimeDistributedDense, lr=.001, base_name='rlstm',
                       add_input_noise=True, add_target_noise=True):
    model = Sequential()  
    if add_input_noise:
        model.add(GaussianNoise(.1, input_shape=(None, input_dim)))
    model.add(layer_type(h0_dim, input_dim=input_dim, 
                    init='uniform_small', 
                    W_regularizer=l2(0.0005),
                    activation='tanh'))
    if h1_dim is not None:
        model.add(layer_type(h1_dim, 
                    init='uniform_small', 
                    W_regularizer=l2(0.0005),
                    activation='tanh'))
        
    model.add(rec_layer_type(output_dim, init=rec_layer_init, fix_b_f=fix_b_f, return_sequences=True))
    if add_target_noise:
        model.add(GaussianNoise(5.))
    model.compile(loss="mse", optimizer=RMSprop(lr=lr))  
    
    model.base_name = base_name
    yaml_string = model.to_yaml()
#    print(yaml_string)
    with open(model_savedir + model.base_name+'.yaml', 'w') as f:
        f.write(yaml_string)
    return model
コード例 #17
0
def create_network():
	model=Sequential()
	#layer 1
	model.add(Convolution2D(10,3 ,3,input_shape=(1,PIXELS,PIXELS) ))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(2 , 2)))
	
	
	model.add(Convolution2D(15 , 5, 5, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
	model.add(Activation('relu'))
	model.add(Dropout(0.2))
	
	model.add(Convolution2D(10 , 3, 3, W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
	model.add(Activation('relu'))
	model.add(Dropout(0.2))
	
	model.add(Flatten())
	model.add(Dense(512 ))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))

	#layer 7
	model.add(Dense(512 , W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))
	#layer 8
	model.add(Dense(10))
	model.add(Activation('softmax'))
	
	sgd = SGD(lr=0.01, decay=0.001, momentum=0.9, nesterov=False)
	#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
	model.compile(loss='categorical_crossentropy', optimizer='sgd')
	return model
コード例 #18
0
ファイル: auto_encoder.py プロジェクト: flaviassantos/pyod
    def _build_model(self):
        model = Sequential()
        # Input layer
        model.add(Dense(
            self.hidden_neurons_[0], activation=self.hidden_activation,
            input_shape=(self.n_features_,),
            activity_regularizer=l2(self.l2_regularizer)))
        model.add(Dropout(self.dropout_rate))

        # Additional layers
        for i, hidden_neurons in enumerate(self.hidden_neurons_, 1):
            model.add(Dense(
                hidden_neurons,
                activation=self.hidden_activation,
                activity_regularizer=l2(self.l2_regularizer)))
            model.add(Dropout(self.dropout_rate))

        # Output layers
        model.add(Dense(self.n_features_, activation=self.output_activation,
                        activity_regularizer=l2(self.l2_regularizer)))

        # Compile model
        model.compile(loss=self.loss, optimizer=self.optimizer)
        print(model.summary())
        return model
コード例 #19
0
ファイル: train_utils.py プロジェクト: xiaoda99/keras
def build_reduced_lstm(input_dim, h0_dim=40, h1_dim=None, output_dim=1, 
                       rec_layer_type=ReducedLSTMA, rec_layer_init='uniform',
                       layer_type=TimeDistributedDense, lr=.001, base_name='rlstm'):
    model = Sequential()  
    model.add(layer_type(h0_dim, input_dim=input_dim, 
                    init='uniform', 
                    W_regularizer=l2(0.0005),
                    activation='relu'))
    if h1_dim is not None:
        model.add(layer_type(h1_dim, 
                    init='uniform', 
                    W_regularizer=l2(0.0005),
                    activation='relu'))
#    model.add(LSTM(h0_dim, 
#                   input_dim=input_dim,
#                   init='uniform',
#                   inner_activation='sigmoid',
#                   return_sequences=True))
#    model.add(Dropout(0.4))
#    if h1_dim is not None:
#        model.add(LSTM(h1_dim,
#                       init='uniform',
#                       inner_activation='sigmoid',
#                       return_sequences=True))
#        model.add(Dropout(0.4))
        
    model.add(rec_layer_type(output_dim, init=rec_layer_init, return_sequences=True))
    model.compile(loss="mse", optimizer=RMSprop(lr=lr))  
    
    model.base_name = base_name
    yaml_string = model.to_yaml()
#    print(yaml_string)
    with open(model_savedir + model.base_name+'.yaml', 'w') as f:
        f.write(yaml_string)
    return model
コード例 #20
0
    def __init__(graph, input_node, dim_ordering, output_num_channels, num_base_filters):
        #input should be the same dimension asn output of concatentation of forwards inception layer
        self.graph = graph
        self.input_node = input_node
        #output_num_channels should be the number of channels
        #that the original signal fed into the forward inception unit had
        self.output_num_channels = output_num_channels

        self.num_base_filters = num_base_filters

        assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
        self.dim_ordering = dim_ordering

        self.border_mode = 'same'
        self.W_regularizer = l2(0.01)
        self.b_regularizer = l2(0.01)
        self.activity_regularizer = activity_l2(0.01)
        self.W_constraint = None
        self.b_constraint = None
        self.init = 'glorot_uniform'
        self.activator = Activation('hard_sigmoid')

        self.split_inputs()
        left_branch = self.left_branch()
        left_center_branch = self.left_center_branch()
        right_center_branch = self.right_center_branch()
        right_branch = self.right_branch()
        #avg or sum or max?
        self.result = self.combine_branches(left_branch, left_center_branch,
                    right_center_branch, right_branch, 'sum')
コード例 #21
0
def train_top_model():
    train_data = np.load(open('bottleneck_features_train.npy'))
    train_labels = np.load(open('target_train.npy'))

    validation_data = np.load(open('bottleneck_features_validation.npy'))
    validation_labels = np.load(open('target_valid.npy'))

    model = Sequential()
    model.add(Flatten(input_shape=train_data.shape[1:]))
    model.add(Dense(128, W_regularizer=l2(0.005)))
    model.add(LeakyReLU(alpha=0.001))
    model.add(Dropout(0.5))
    model.add(Dense(128, W_regularizer=l2(0.005)))
    model.add(LeakyReLU(alpha=0.001))
    model.add(Dropout(0.5))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    model.compile(optimizer=Adam(lr=1e-5), loss='categorical_crossentropy')

    model.fit(train_data, train_labels,
              nb_epoch=nb_epoch_top_model, batch_size=batch_size,
              validation_data=(validation_data, validation_labels))
    model.save_weights(top_model_weights_path)
    return
コード例 #22
0
ファイル: anssel_st.py プロジェクト: BenjaminHess/dataset-sts
def prep_model(N, dropout=0, l2reg=1e-4):
    model = Graph()

    model.add_input(name='e0', input_shape=(N,))
    model.add_input(name='e1', input_shape=(N,))

    model.add_node(name='e0_', input='e0',
                   layer=Activation('linear'))
    model.add_node(name='e1_', input='e1',
                   layer=Activation('linear'))

    model.add_node(name='mul', inputs=['e0_', 'e1_'], layer=Activation('linear'), merge_mode='mul')
    model.add_node(name='sum', inputs=['e0_', 'e1_'], layer=Activation('linear'), merge_mode='sum')

    # absdiff_name = B.absdiff_merge(model, ["e0_", "e1_"], pfx="", layer_name="absdiff")


    model.add_node(name="mul_", input="mul", layer=Dropout(dropout))
    model.add_node(name="sum_", input="sum", layer=Dropout(dropout))

    model.add_node(name='hiddenA', inputs=['mul_', 'sum_'], merge_mode='concat',
                   layer=Dense(50, W_regularizer=l2(l2reg)))

    model.add_node(name='hiddenAS', input='hiddenA',
                   layer=Activation('sigmoid'))

    model.add_node(name='out', input='hiddenAS',
                   layer=Dense(1, W_regularizer=l2(l2reg)))

    model.add_node(name='outS', input='out',
                   layer=Activation('sigmoid'))

    model.add_output(name='score', input='outS')
    return model
コード例 #23
0
def build_model(image_shape=(256,256), embedding_size=512):

    s = image_shape[-1]

    #序列化模型的输出
    feat=Sequential()
    #加层
    #C64 * 128
    feat.add(Conv2D(64,(3,3), strides=(2, 2), activation='relu',padding='same', input_shape=(3, s, s), data_format='channels_first'))
    #C128 * 64
    feat.add(Conv2D(128,(3,3),strides=(2, 2),activation='relu',data_format='channels_first', padding='same'))
    #C256 * 32
    feat.add(Conv2D(256, (3,3), strides=(2, 2),activation='relu', data_format='channels_first', padding='same'))
    #Flatten层用来将输入“压平”,即把多维的输入一维化,常用在从卷积层到全连接层的过渡。
    feat.add(Flatten())
    #FC512, 卷基层 与 激活层 均使用 L2 regulizer
    feat.add(Dense(embedding_size,kernel_regularizer=regularizers.l2(0.01),
                activity_regularizer=regularizers.l2(0.01)))

    #Input定义输入层, 这里都对应着一张图片
    inp1=Input(shape=(3,s,s))
    inp2=Input(shape=(3,s,s))

    #注意,此处用的一个模型,同一个feat
    feat1=feat(inp1)
    feat2=feat(inp2)

    #计算欧式距离,输入的是一个列表
    #使用Lambda层,本函数用以对上一层的输出施以任何Theano/TensorFlow表达式
    distance = Lambda(euclidean_distance, output_shape = eucl_dist_output_shape)([feat1, feat2])

    model2 = Model([inp1,inp2],[distance])
    model2.compile('adam',loss = 'mse')
    
    return {'feat':feat, 'tee':model2}
コード例 #24
0
ファイル: smschcnnr.py プロジェクト: Seleucia/CNNRNet
def build_model(params):
   l2=regularizers.l2(0.01)
   l2_out=regularizers.l2(0.001)

   model = Sequential()
   model.add(Convolution2D(params["nkerns"][0], 7, 7,subsample=params['stride_mat'], border_mode='valid', input_shape=(params["nc"], params["size"][1], params["size"][0]),init='he_normal', W_regularizer=l2))
   model.add(Activation('relu'))
   model.add(MaxPooling2D(pool_size=(2, 2)))

   model.add(Convolution2D(params["nkerns"][1], 3, 3,subsample=params['stride_mat'],init='he_normal', W_regularizer=l2))
   model.add(Activation('relu'))
   model.add(MaxPooling2D(pool_size=(2, 2)))

   model.add(Convolution2D(params["nkerns"][2], 2, 2,init='he_normal', W_regularizer=l2))
   model.add(Activation('relu'))
   model.add(MaxPooling2D(pool_size=(2, 2)))
   model.add(Flatten())
   model.add(Dropout(0.5))

   model.add(Dense(400,init='he_normal', W_regularizer=l2_out))
   model.add(Activation('relu'))
   model.add(Dense(400,init='he_normal'))
   model.add(Activation('relu'))
   model.add(Dense(params['n_output'],init='he_normal'))
   model.add(Activation('softmax'))
   adagrad=Adagrad(lr=params['initial_learning_rate'], epsilon=1e-6)
   model.compile(loss='categorical_crossentropy', optimizer=adagrad)
   return model
コード例 #25
0
ファイル: thmlpr.py プロジェクト: Seleucia/CNNRNet
def build_model(params):
   l2=regularizers.l2(0.001)
   l2_out=regularizers.l2(0.00001)
   dims=4096

   #########Left Stream######################
   lmodel = Sequential()
   lmodel.add(Dense(256, input_shape=(dims,),init='he_normal', W_regularizer=l2,activation='tanh'))
   lmodel.add(Dense(256,init='he_normal', W_regularizer=l2,activation='tanh'))
   lmodel.add(Dense(256,init='he_normal', W_regularizer=l2,activation='tanh'))

   #########Right Stream######################
   rmodel = Sequential()
   rmodel.add(Dense(256, input_shape=(dims,),init='he_normal', W_regularizer=l2,activation='tanh'))
   rmodel.add(Dense(256,init='he_normal', W_regularizer=l2,activation='tanh'))
   rmodel.add(Dense(256,init='he_normal', W_regularizer=l2,activation='tanh'))

   #########Merge Stream######################
   model = Sequential()
   model.add(Merge([lmodel, rmodel], mode='mul'))
   model.add(Dense(256,init='he_normal', W_regularizer=l2_out,activation='tanh'))

   model.add(Dense(256,init='he_normal'))
   model.add(Activation('linear'))

   model.add(Dense(params['n_output'],init='he_normal'))

   adagrad=Adagrad(lr=params['initial_learning_rate'], epsilon=1e-6)
   model.compile(loss='mean_squared_error', optimizer=adagrad)
   return model
コード例 #26
0
ファイル: train_ddd.py プロジェクト: SuriyaNitt/DDD
def CNN_model(frameHeight, frameWidth):
    model = Sequential()
    model.add(Convolution2D(32, 3, 3, border_mode='same', init='he_normal', activation='relu',
                            input_shape=(1, int(frameHeight), int(frameWidth))))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.1))

    model.add(Convolution2D(64, 3, 3, border_mode='same', init='he_normal', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Convolution2D(128, 3, 3, border_mode='same', init='he_normal', activation='relu'))
    model.add(MaxPooling2D(pool_size=(8, 8)))
    model.add(Dropout(0.2))

    model.add(Flatten())

    model.add(Dense(32, W_regularizer=l2(1.26e-7)))
    model.add(Activation('relu'))

    model.add(Dense(2, W_regularizer=l2(1e-0)))
    model.add(Activation('softmax'))

    model.compile(Adam(lr=1e-3), loss='categorical_crossentropy')

    plot(model, to_file='model.png')

    return model
コード例 #27
0
ファイル: model_keras.py プロジェクト: ethen8181/programming
def build_keras_base(hidden_layers = [64, 64, 64], dropout_rate = 0, 
                     l2_penalty = 0.1, optimizer = 'adam',
                     n_input = 100, n_class = 2):
    """
    Keras Multi-layer neural network. Fixed parameters include: 
    1. activation function (PRelu)
    2. always uses batch normalization after the activation
    3. use adam as the optimizer
    
    Parameters
    ----------
    Tunable parameters are (these are the ones that are commonly tuned)
    
    hidden_layers: list
        the number of hidden layers, and the size of each hidden layer

    dropout_rate: float 0 ~ 1
        if bigger than 0, there will be a dropout layer

    l2_penalty: float
        or so called l2 regularization

    optimizer: string or keras optimizer
        method to train the network

    Returns
    -------
    model : 
        a keras model

    Reference
    ---------
    https://keras.io/scikit-learn-api/
    """   
    model = Sequential()    
    for index, layers in enumerate(hidden_layers):       
        if not index:
            # specify the input_dim to be the number of features for the first layer
            model.add( Dense( layers, input_dim = n_input,
                              W_regularizer = l2(l2_penalty) ) )
        else:
            model.add( Dense( layers, W_regularizer = l2(l2_penalty) ) )
    
        # insert BatchNorm layer immediately after fully connected layers
        # and before activation layer
        model.add( BatchNormalization() )
        model.add( PReLU() )
        if dropout_rate:
            model.add( Dropout(p = dropout_rate) )
    
    model.add( Dense(n_class) )
    model.add( Activation('softmax') )
    
    # the loss for binary and muti-class classification is different 
    loss = 'binary_crossentropy'
    if n_class > 2:
        loss = 'categorical_crossentropy'
    
    model.compile( loss = loss, optimizer = optimizer, metrics = ['accuracy'] )  
    return model
コード例 #28
0
ファイル: model.py プロジェクト: jackg0h/foodtag
def conv2D_bn(x, nb_filter, nb_row, nb_col,
              border_mode='same', subsample=(1, 1),
              activation='relu', batch_norm=USE_BN,
              weight_decay=WEIGHT_DECAY, dim_ordering=DIM_ORDERING):
    '''

        Info:
            Function taken from the Inceptionv3.py script keras github


            Utility function to apply to a tensor a module conv + BN
            with optional weight decay (L2 weight regularization).
    '''
    if weight_decay:
        W_regularizer = regularizers.l2(weight_decay)
        b_regularizer = regularizers.l2(weight_decay)
    else:
        W_regularizer = None
        b_regularizer = None

    x = Convolution2D(nb_filter, nb_row, nb_col,
                      subsample=subsample,
                      activation=activation,
                      border_mode=border_mode,
                      W_regularizer=W_regularizer,
                      b_regularizer=b_regularizer,
                      dim_ordering=dim_ordering)(x)
    x = ZeroPadding2D(padding=(1, 1), dim_ordering=DIM_ORDERING)(x)

    if batch_norm:
        x = LRN2D()(x)
        x = ZeroPadding2D(padding=(1, 1), dim_ordering=DIM_ORDERING)(x)

    return x
コード例 #29
0
ファイル: ifshort_mlp.py プロジェクト: xi-studio/keras
def train_model(dataset, h0_dim, h1_dim, y_dim):
    X_train, y_train, X_test, y_test = dataset
    batch_size = 512
    nb_epoch = 100
    model = Sequential()
    model.add(Dense(h0_dim, input_shape=(X_train.shape[1],), 
                    init='uniform', 
                    W_regularizer=l2(0.0005),
                    activation='relu'))
    model.add(Dense(h1_dim,  
                    init='uniform', 
                    W_regularizer=l2(0.0005),
                    activation='relu'))
    model.add(Dense(y_dim,  
                    init='uniform', 
                    W_regularizer=l2(0.0005)))
    
    rms = RMSprop()
    sgd = SGD(lr=0.01, decay=1e-4, momentum=0.6, nesterov=False)
    model.compile(loss='mse', optimizer=sgd)
    #model.get_config(verbose=1)
    #yaml_string = model.to_yaml()
    #with open('ifshort_mlp.yaml', 'w') as f:
    #    f.write(yaml_string)
        
    early_stopping = EarlyStopping(monitor='val_loss', patience=10)
    checkpointer = ModelCheckpoint(filepath="/tmp/ifshort_mlp_weights.hdf5", verbose=1, save_best_only=True)
    model.fit(X_train, y_train, 
              batch_size=batch_size, 
              nb_epoch=nb_epoch, 
              show_accuracy=False, 
              verbose=2, 
              validation_data=(X_test, y_test), 
              callbacks=[early_stopping, checkpointer])
コード例 #30
0
def conv_layer(x, nb_filter, nb_row, nb_col, dim_ordering,
               subsample=(1, 1), activation='relu',
               border_mode='same', weight_decay=None, padding=None):

    if weight_decay:
        W_regularizer = regularizers.l2(weight_decay)
        b_regularizer = regularizers.l2(weight_decay)
    else:
        W_regularizer = None
        b_regularizer = None

    x = Convolution2D(nb_filter, nb_row, nb_col,
                      subsample=subsample,
                      activation=activation,
                      border_mode=border_mode,
                      W_regularizer=W_regularizer,
                      b_regularizer=b_regularizer,
                      bias=False,
                      dim_ordering=dim_ordering)(x)

    if padding:
        for i in range(padding):
            x = ZeroPadding2D(padding=(1, 1), dim_ordering=DIM_ORDERING)(x)

    return x
コード例 #31
0
def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3, growth_rate=12, nb_filter=-1,
                       nb_layers_per_block=-1, bottleneck=False, reduction=0.0, dropout_rate=None, weight_decay=1e-4,
                       subsample_initial_block=False, activation='softmax'):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
                Can be a -1, positive integer or a list.
                If -1, calculates nb_layer_per_block from the depth of the network.
                If positive integer, a set number of layers per dense block.
                If list, nb_layer is used as provided. Note that list size must
                be (nb_dense_block + 1)
        bottleneck: add bottleneck blocks
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay rate
        subsample_initial_block: Set to True to subsample the initial convolution and
                add a MaxPool2D before the dense blocks are added.
        subsample_initial:
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                Note that if sigmoid is used, classes must be 1.
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block), 'If list, nb_layer is used as provided. ' \
                                                   'Note that list size must be (nb_dense_block)'
        final_nb_layer = nb_layers[-1]
        nb_layers = nb_layers[:-1]
    else:
        if nb_layers_per_block == -1:
            assert (depth - 4) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
            count = int((depth - 4) / 3)

            if bottleneck:
                count = count // 2

            nb_layers = [count for _ in range(nb_dense_block)]
            final_nb_layer = count
        else:
            final_nb_layer = nb_layers_per_block
            nb_layers = [nb_layers_per_block] * nb_dense_block

    # compute initial nb_filter if -1, else accept users initial nb_filter
    if nb_filter <= 0:
        nb_filter = 2 * growth_rate

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    if subsample_initial_block:
        initial_kernel = (7, 7)
        initial_strides = (2, 2)
    else:
        initial_kernel = (3, 3)
        initial_strides = (1, 1)

    x = Conv2D(nb_filter, initial_kernel, kernel_initializer='he_normal', padding='same',
               strides=initial_strides, use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)

    if subsample_initial_block:
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, bottleneck=bottleneck,
                                     dropout_rate=dropout_rate, weight_decay=weight_decay)
        # add transition_block
        x = __transition_block(x, nb_filter, compression=compression, weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    # The last dense_block does not have a transition_block
    x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate, bottleneck=bottleneck,
                                 dropout_rate=dropout_rate, weight_decay=weight_decay)

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)

    if include_top:
        x = Dense(nb_classes, activation=activation)(x)

    return x
コード例 #32
0
ファイル: m23_net.py プロジェクト: ayenter/imdb_mud
print("BUILDING MODEL")
embedding_vecor_length = 32

input_layer = Embedding(len(tokenizer.word_index) + 1,
                        global_emb_dim,
                        weights=[emb_matrix],
                        input_length=global_max_seq,
                        trainable=False)

branch_3 = Sequential()
branch_3.add(input_layer)
branch_3.add(
    Conv1D(filters=32,
           kernel_size=3,
           padding='same',
           kernel_regularizer=l2(.01)))
branch_3.add(Activation('relu'))
branch_3.add(MaxPooling1D(pool_size=2))
branch_3.add(Dropout(0.5))
branch_3.add(BatchNormalization())
branch_3.add(LSTM(100))

branch_4 = Sequential()
branch_4.add(input_layer)
branch_4.add(
    Conv1D(filters=32,
           kernel_size=4,
           padding='same',
           kernel_regularizer=l2(.01)))
branch_4.add(Activation('relu'))
branch_4.add(MaxPooling1D(pool_size=2))
def ssd_512(image_size,
            n_classes,
            mode='training',
            l2_regularization=0.0005,
            min_scale=None,
            max_scale=None,
            scales=None,
            aspect_ratios_global=None,
            aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
                                     [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                                     [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                                     [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                                     [1.0, 2.0, 0.5, 3.0, 1.0/3.0],
                                     [1.0, 2.0, 0.5],
                                     [1.0, 2.0, 0.5]],
            two_boxes_for_ar1=True,
            steps=[8, 16, 32, 64, 128, 256, 512],
            offsets=None,
            clip_boxes=False,
            variances=[0.1, 0.1, 0.2, 0.2],
            coords='centroids',
            normalize_coords=True,
            subtract_mean=[123, 117, 104],
            divide_by_stddev=None,
            swap_channels=[2, 1, 0],
            confidence_thresh=0.01,
            iou_threshold=0.45,
            top_k=200,
            nms_max_output_size=400,
            return_predictor_sizes=False):
    '''
    Build a Keras model with SSD512 architecture, see references.

    The base network is a reduced atrous VGG-16, extended by the SSD architecture,
    as described in the paper.

    Most of the arguments that this function takes are only needed for the anchor
    box layers. In case you're training the network, the parameters passed here must
    be the same as the ones used to set up `SSDBoxEncoder`. In case you're loading
    trained weights, the parameters passed here must be the same as the ones used
    to produce the trained weights.

    Some of these arguments are explained in more detail in the documentation of the
    `SSDBoxEncoder` class.

    Note: Requires Keras v2.0 or later. Currently works only with the
    TensorFlow backend (v1.0 or later).

    Arguments:
        image_size (tuple): The input image size in the format `(height, width, channels)`.
        n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
        mode (str, optional): One of 'training', 'inference' and 'inference_fast'. In 'training' mode,
            the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes,
            the raw predictions are decoded into absolute coordinates and filtered via confidence thresholding,
            non-maximum suppression, and top-k filtering. The difference between latter two modes is that
            'inference' follows the exact procedure of the original Caffe implementation, while
            'inference_fast' uses a faster prediction decoding procedure.
        l2_regularization (float, optional): The L2-regularization rate. Applies to all convolutional layers.
            Set to zero to deactivate L2-regularization.
        min_scale (float, optional): The smallest scaling factor for the size of the anchor boxes as a fraction
            of the shorter side of the input images.
        max_scale (float, optional): The largest scaling factor for the size of the anchor boxes as a fraction
            of the shorter side of the input images. All scaling factors between the smallest and the
            largest will be linearly interpolated. Note that the second to last of the linearly interpolated
            scaling factors will actually be the scaling factor for the last predictor layer, while the last
            scaling factor is used for the second box for aspect ratio 1 in the last predictor layer
            if `two_boxes_for_ar1` is `True`.
        scales (list, optional): A list of floats containing scaling factors per convolutional predictor layer.
            This list must be one element longer than the number of predictor layers. The first `k` elements are the
            scaling factors for the `k` predictor layers, while the last element is used for the second box
            for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is `True`. This additional
            last scaling factor must be passed either way, even if it is not being used.
            If a list is passed, this argument overrides `min_scale` and `max_scale`. All scaling factors
            must be greater than zero.
        aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes are to be
            generated. This list is valid for all prediction layers.
        aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for each prediction layer.
            This allows you to set the aspect ratios for each predictor layer individually, which is the case for the
            original SSD512 implementation. If a list is passed, it overrides `aspect_ratios_global`.
        two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratio lists that contain 1. Will be ignored otherwise.
            If `True`, two anchor boxes will be generated for aspect ratio 1. The first will be generated
            using the scaling factor for the respective layer, the second one will be generated using
            geometric mean of said scaling factor and next bigger scaling factor.
        steps (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
            either ints/floats or tuples of two ints/floats. These numbers represent for each predictor layer how many
            pixels apart the anchor box center points should be vertically and horizontally along the spatial grid over
            the image. If the list contains ints/floats, then that value will be used for both spatial dimensions.
            If the list contains tuples of two ints/floats, then they represent `(step_height, step_width)`.
            If no steps are provided, then they will be computed such that the anchor box center points will form an
            equidistant grid within the image dimensions.
        offsets (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
            either floats or tuples of two floats. These numbers represent for each predictor layer how many
            pixels from the top and left boarders of the image the top-most and left-most anchor box center points should be
            as a fraction of `steps`. The last bit is important: The offsets are not absolute pixel values, but fractions
            of the step size specified in the `steps` argument. If the list contains floats, then that value will
            be used for both spatial dimensions. If the list contains tuples of two floats, then they represent
            `(vertical_offset, horizontal_offset)`. If no offsets are provided, then they will default to 0.5 of the step size.
        clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries.
        variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be divided by
            its respective variance value.
        coords (str, optional): The box coordinate format to be used internally by the model (i.e. this is not the input format
            of the ground truth labels). Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width,
            and height), 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format `(xmin, ymin, xmax, ymax)`.
        normalize_coords (bool, optional): Set to `True` if the model is supposed to use relative instead of absolute coordinates,
            i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
        subtract_mean (array-like, optional): `None` or an array-like object of integers or floating point values
            of any shape that is broadcast-compatible with the image shape. The elements of this array will be
            subtracted from the image pixel intensity values. For example, pass a list of three integers
            to perform per-channel mean normalization for color images.
        divide_by_stddev (array-like, optional): `None` or an array-like object of non-zero integers or
            floating point values of any shape that is broadcast-compatible with the image shape. The image pixel
            intensity values will be divided by the elements of this array. For example, pass a list
            of three integers to perform per-channel standard deviation normalization for color images.
        swap_channels (list, optional): Either `False` or a list of integers representing the desired order in which the input
            image channels should be swapped.
        confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
            positive class in order to be considered for the non-maximum suppression stage for the respective class.
            A lower value will result in a larger part of the selection process being done by the non-maximum suppression
            stage, while a larger value will result in a larger part of the selection process happening in the confidence
            thresholding stage.
        iou_threshold (float, optional): A float in [0,1]. All boxes that have a Jaccard similarity of greater than `iou_threshold`
            with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
            to the box's confidence score.
        top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
            non-maximum suppression stage.
        nms_max_output_size (int, optional): The maximal number of predictions that will be left over after the NMS stage.
        return_predictor_sizes (bool, optional): If `True`, this function not only returns the model, but also
            a list containing the spatial dimensions of the predictor layers. This isn't strictly necessary since
            you can always get their sizes easily via the Keras API, but it's convenient and less error-prone
            to get them this way. They are only relevant for training anyway (SSDBoxEncoder needs to know the
            spatial dimensions of the predictor layers), for inference you don't need them.

    Returns:
        model: The Keras SSD512 model.
        predictor_sizes (optional): A Numpy array containing the `(height, width)` portion
            of the output tensor shape for each convolutional predictor layer. During
            training, the generator function needs this in order to transform
            the ground truth labels into tensors of identical structure as the
            output tensors of the model, which is in turn needed for the cost
            function.

    References:
        https://arxiv.org/abs/1512.02325v5
    '''

    n_predictor_layers = 7 # The number of predictor conv layers in the network is 7 for the original SSD512
    n_classes += 1 # Account for the background class.
    l2_reg = l2_regularization # Make the internal name shorter.
    img_height, img_width, img_channels = image_size[0], image_size[1], image_size[2]

    ############################################################################
    # Get a few exceptions out of the way.
    ############################################################################

    if aspect_ratios_global is None and aspect_ratios_per_layer is None:
        raise ValueError("`aspect_ratios_global` and `aspect_ratios_per_layer` cannot both be None. At least one needs to be specified.")
    if aspect_ratios_per_layer:
        if len(aspect_ratios_per_layer) != n_predictor_layers:
            raise ValueError("It must be either aspect_ratios_per_layer is None or len(aspect_ratios_per_layer) == {}, but len(aspect_ratios_per_layer) == {}.".format(n_predictor_layers, len(aspect_ratios_per_layer)))

    if (min_scale is None or max_scale is None) and scales is None:
        raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
    if scales:
        if len(scales) != n_predictor_layers+1:
            raise ValueError("It must be either scales is None or len(scales) == {}, but len(scales) == {}.".format(n_predictor_layers+1, len(scales)))
    else: # If no explicit list of scaling factors was passed, compute the list of scaling factors from `min_scale` and `max_scale`
        scales = np.linspace(min_scale, max_scale, n_predictor_layers+1)

    if len(variances) != 4:
        raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances)))
    variances = np.array(variances)
    if np.any(variances <= 0):
        raise ValueError("All variances must be >0, but the variances given are {}".format(variances))

    if (not (steps is None)) and (len(steps) != n_predictor_layers):
        raise ValueError("You must provide at least one step value per predictor layer.")

    if (not (offsets is None)) and (len(offsets) != n_predictor_layers):
        raise ValueError("You must provide at least one offset value per predictor layer.")

    ############################################################################
    # Compute the anchor box parameters.
    ############################################################################

    # Set the aspect ratios for each predictor layer. These are only needed for the anchor box layers.
    if aspect_ratios_per_layer:
        aspect_ratios = aspect_ratios_per_layer
    else:
        aspect_ratios = [aspect_ratios_global] * n_predictor_layers

    # Compute the number of boxes to be predicted per cell for each predictor layer.
    # We need this so that we know how many channels the predictor layers need to have.
    if aspect_ratios_per_layer:
        n_boxes = []
        for ar in aspect_ratios_per_layer:
            if (1 in ar) & two_boxes_for_ar1:
                n_boxes.append(len(ar) + 1) # +1 for the second box for aspect ratio 1
            else:
                n_boxes.append(len(ar))
    else: # If only a global aspect ratio list was passed, then the number of boxes is the same for each predictor layer
        if (1 in aspect_ratios_global) & two_boxes_for_ar1:
            n_boxes = len(aspect_ratios_global) + 1
        else:
            n_boxes = len(aspect_ratios_global)
        n_boxes = [n_boxes] * n_predictor_layers

    if steps is None:
        steps = [None] * n_predictor_layers
    if offsets is None:
        offsets = [None] * n_predictor_layers

    ############################################################################
    # Define functions for the Lambda layers below.
    ############################################################################

    def identity_layer(tensor):
        return tensor

    def input_mean_normalization(tensor):
        return tensor - np.array(subtract_mean)

    def input_stddev_normalization(tensor):
        return tensor / np.array(divide_by_stddev)

    def input_channel_swap(tensor):
        if len(swap_channels) == 3:
            return K.stack([tensor[...,swap_channels[0]], tensor[...,swap_channels[1]], tensor[...,swap_channels[2]]], axis=-1)
        elif len(swap_channels) == 4:
            return K.stack([tensor[...,swap_channels[0]], tensor[...,swap_channels[1]], tensor[...,swap_channels[2]], tensor[...,swap_channels[3]]], axis=-1)

    ############################################################################
    # Build the network.
    ############################################################################

    x = Input(shape=(img_height, img_width, img_channels))

    # The following identity layer is only needed so that the subsequent lambda layers can be optional.
    x1 = Lambda(identity_layer, output_shape=(img_height, img_width, img_channels), name='identity_layer')(x)
    if not (subtract_mean is None):
        x1 = Lambda(input_mean_normalization, output_shape=(img_height, img_width, img_channels), name='input_mean_normalization')(x1)
    if not (divide_by_stddev is None):
        x1 = Lambda(input_stddev_normalization, output_shape=(img_height, img_width, img_channels), name='input_stddev_normalization')(x1)
    if swap_channels:
        x1 = Lambda(input_channel_swap, output_shape=(img_height, img_width, img_channels), name='input_channel_swap')(x1)

    conv1_1 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv1_1')(x1)
    conv1_2 = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv1_2')(conv1_1)
    pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='pool1')(conv1_2)

    conv2_1 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv2_1')(pool1)
    conv2_2 = Conv2D(128, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv2_2')(conv2_1)
    pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='pool2')(conv2_2)

    conv3_1 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv3_1')(pool2)
    conv3_2 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv3_2')(conv3_1)
    conv3_3 = Conv2D(256, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv3_3')(conv3_2)
    pool3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='pool3')(conv3_3)

    conv4_1 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv4_1')(pool3)
    conv4_2 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv4_2')(conv4_1)
    conv4_3 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv4_3')(conv4_2)
    pool4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same', name='pool4')(conv4_3)

    conv5_1 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv5_1')(pool4)
    conv5_2 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv5_2')(conv5_1)
    conv5_3 = Conv2D(512, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv5_3')(conv5_2)
    pool5 = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name='pool5')(conv5_3)

    fc6 = Conv2D(1024, (3, 3), dilation_rate=(6, 6), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='fc6')(pool5)

    fc7 = Conv2D(1024, (1, 1), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='fc7')(fc6)

    conv6_1 = Conv2D(256, (1, 1), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv6_1')(fc7)
    conv6_1 = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv6_padding')(conv6_1)
    conv6_2 = Conv2D(512, (3, 3), strides=(2, 2), activation='relu', padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv6_2')(conv6_1)

    conv7_1 = Conv2D(128, (1, 1), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv7_1')(conv6_2)
    conv7_1 = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv7_padding')(conv7_1)
    conv7_2 = Conv2D(256, (3, 3), strides=(2, 2), activation='relu', padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv7_2')(conv7_1)

    conv8_1 = Conv2D(128, (1, 1), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv8_1')(conv7_2)
    conv8_1 = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv8_padding')(conv8_1)
    conv8_2 = Conv2D(256, (3, 3), strides=(2, 2), activation='relu', padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv8_2')(conv8_1)

    conv9_1 = Conv2D(128, (1, 1), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv9_1')(conv8_2)
    conv9_1 = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv9_padding')(conv9_1)
    conv9_2 = Conv2D(256, (3, 3), strides=(2, 2), activation='relu', padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv9_2')(conv9_1)

    conv10_1 = Conv2D(128, (1, 1), activation='relu', padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv10_1')(conv9_2)
    conv10_1 = ZeroPadding2D(padding=((1, 1), (1, 1)), name='conv10_padding')(conv10_1)
    conv10_2 = Conv2D(256, (4, 4), strides=(1, 1), activation='relu', padding='valid', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv10_2')(conv10_1)

    # Feed conv4_3 into the L2 normalization layer
    conv4_3_norm = L2Normalization(gamma_init=20, name='conv4_3_norm')(conv4_3)

    ### Build the convolutional predictor layers on top of the base network

    # We precidt `n_classes` confidence values for each box, hence the confidence predictors have depth `n_boxes * n_classes`
    # Output shape of the confidence layers: `(batch, height, width, n_boxes * n_classes)`
    conv4_3_norm_mbox_conf = Conv2D(n_boxes[0] * n_classes, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv4_3_norm_mbox_conf')(conv4_3_norm)
    fc7_mbox_conf = Conv2D(n_boxes[1] * n_classes, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='fc7_mbox_conf')(fc7)
    conv6_2_mbox_conf = Conv2D(n_boxes[2] * n_classes, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv6_2_mbox_conf')(conv6_2)
    conv7_2_mbox_conf = Conv2D(n_boxes[3] * n_classes, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv7_2_mbox_conf')(conv7_2)
    conv8_2_mbox_conf = Conv2D(n_boxes[4] * n_classes, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv8_2_mbox_conf')(conv8_2)
    conv9_2_mbox_conf = Conv2D(n_boxes[5] * n_classes, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv9_2_mbox_conf')(conv9_2)
    conv10_2_mbox_conf = Conv2D(n_boxes[6] * n_classes, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv10_2_mbox_conf')(conv10_2)
    # We predict 4 box coordinates for each box, hence the localization predictors have depth `n_boxes * 4`
    # Output shape of the localization layers: `(batch, height, width, n_boxes * 4)`
    conv4_3_norm_mbox_loc = Conv2D(n_boxes[0] * 4, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv4_3_norm_mbox_loc')(conv4_3_norm)
    fc7_mbox_loc = Conv2D(n_boxes[1] * 4, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='fc7_mbox_loc')(fc7)
    conv6_2_mbox_loc = Conv2D(n_boxes[2] * 4, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv6_2_mbox_loc')(conv6_2)
    conv7_2_mbox_loc = Conv2D(n_boxes[3] * 4, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv7_2_mbox_loc')(conv7_2)
    conv8_2_mbox_loc = Conv2D(n_boxes[4] * 4, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv8_2_mbox_loc')(conv8_2)
    conv9_2_mbox_loc = Conv2D(n_boxes[5] * 4, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv9_2_mbox_loc')(conv9_2)
    conv10_2_mbox_loc = Conv2D(n_boxes[6] * 4, (3, 3), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv10_2_mbox_loc')(conv10_2)

    ### Generate the anchor boxes (called "priors" in the original Caffe/C++ implementation, so I'll keep their layer names)

    # Output shape of anchors: `(batch, height, width, n_boxes, 8)`
    conv4_3_norm_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[0], next_scale=scales[1], aspect_ratios=aspect_ratios[0],
                                             two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[0], this_offsets=offsets[0], clip_boxes=clip_boxes,
                                             variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv4_3_norm_mbox_priorbox')(conv4_3_norm_mbox_loc)
    fc7_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[1], next_scale=scales[2], aspect_ratios=aspect_ratios[1],
                                    two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[1], this_offsets=offsets[1], clip_boxes=clip_boxes,
                                    variances=variances, coords=coords, normalize_coords=normalize_coords, name='fc7_mbox_priorbox')(fc7_mbox_loc)
    conv6_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[2], next_scale=scales[3], aspect_ratios=aspect_ratios[2],
                                        two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[2], this_offsets=offsets[2], clip_boxes=clip_boxes,
                                        variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv6_2_mbox_priorbox')(conv6_2_mbox_loc)
    conv7_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[3], next_scale=scales[4], aspect_ratios=aspect_ratios[3],
                                        two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[3], this_offsets=offsets[3], clip_boxes=clip_boxes,
                                        variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv7_2_mbox_priorbox')(conv7_2_mbox_loc)
    conv8_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[4], next_scale=scales[5], aspect_ratios=aspect_ratios[4],
                                        two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[4], this_offsets=offsets[4], clip_boxes=clip_boxes,
                                        variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv8_2_mbox_priorbox')(conv8_2_mbox_loc)
    conv9_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[5], next_scale=scales[6], aspect_ratios=aspect_ratios[5],
                                        two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[5], this_offsets=offsets[5], clip_boxes=clip_boxes,
                                        variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv9_2_mbox_priorbox')(conv9_2_mbox_loc)
    conv10_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[6], next_scale=scales[7], aspect_ratios=aspect_ratios[6],
                                        two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[6], this_offsets=offsets[6], clip_boxes=clip_boxes,
                                        variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv10_2_mbox_priorbox')(conv10_2_mbox_loc)

    ### Reshape

    # Reshape the class predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, n_classes)`
    # We want the classes isolated in the last axis to perform softmax on them
    conv4_3_norm_mbox_conf_reshape = Reshape((-1, n_classes), name='conv4_3_norm_mbox_conf_reshape')(conv4_3_norm_mbox_conf)
    fc7_mbox_conf_reshape = Reshape((-1, n_classes), name='fc7_mbox_conf_reshape')(fc7_mbox_conf)
    conv6_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv6_2_mbox_conf_reshape')(conv6_2_mbox_conf)
    conv7_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv7_2_mbox_conf_reshape')(conv7_2_mbox_conf)
    conv8_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv8_2_mbox_conf_reshape')(conv8_2_mbox_conf)
    conv9_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv9_2_mbox_conf_reshape')(conv9_2_mbox_conf)
    conv10_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv10_2_mbox_conf_reshape')(conv10_2_mbox_conf)
    # Reshape the box predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, 4)`
    # We want the four box coordinates isolated in the last axis to compute the smooth L1 loss
    conv4_3_norm_mbox_loc_reshape = Reshape((-1, 4), name='conv4_3_norm_mbox_loc_reshape')(conv4_3_norm_mbox_loc)
    fc7_mbox_loc_reshape = Reshape((-1, 4), name='fc7_mbox_loc_reshape')(fc7_mbox_loc)
    conv6_2_mbox_loc_reshape = Reshape((-1, 4), name='conv6_2_mbox_loc_reshape')(conv6_2_mbox_loc)
    conv7_2_mbox_loc_reshape = Reshape((-1, 4), name='conv7_2_mbox_loc_reshape')(conv7_2_mbox_loc)
    conv8_2_mbox_loc_reshape = Reshape((-1, 4), name='conv8_2_mbox_loc_reshape')(conv8_2_mbox_loc)
    conv9_2_mbox_loc_reshape = Reshape((-1, 4), name='conv9_2_mbox_loc_reshape')(conv9_2_mbox_loc)
    conv10_2_mbox_loc_reshape = Reshape((-1, 4), name='conv10_2_mbox_loc_reshape')(conv10_2_mbox_loc)
    # Reshape the anchor box tensors, yielding 3D tensors of shape `(batch, height * width * n_boxes, 8)`
    conv4_3_norm_mbox_priorbox_reshape = Reshape((-1, 8), name='conv4_3_norm_mbox_priorbox_reshape')(conv4_3_norm_mbox_priorbox)
    fc7_mbox_priorbox_reshape = Reshape((-1, 8), name='fc7_mbox_priorbox_reshape')(fc7_mbox_priorbox)
    conv6_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv6_2_mbox_priorbox_reshape')(conv6_2_mbox_priorbox)
    conv7_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv7_2_mbox_priorbox_reshape')(conv7_2_mbox_priorbox)
    conv8_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv8_2_mbox_priorbox_reshape')(conv8_2_mbox_priorbox)
    conv9_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv9_2_mbox_priorbox_reshape')(conv9_2_mbox_priorbox)
    conv10_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv10_2_mbox_priorbox_reshape')(conv10_2_mbox_priorbox)

    ### Concatenate the predictions from the different layers

    # Axis 0 (batch) and axis 2 (n_classes or 4, respectively) are identical for all layer predictions,
    # so we want to concatenate along axis 1, the number of boxes per layer
    # Output shape of `mbox_conf`: (batch, n_boxes_total, n_classes)
    mbox_conf = Concatenate(axis=1, name='mbox_conf')([conv4_3_norm_mbox_conf_reshape,
                                                       fc7_mbox_conf_reshape,
                                                       conv6_2_mbox_conf_reshape,
                                                       conv7_2_mbox_conf_reshape,
                                                       conv8_2_mbox_conf_reshape,
                                                       conv9_2_mbox_conf_reshape,
                                                       conv10_2_mbox_conf_reshape])

    # Output shape of `mbox_loc`: (batch, n_boxes_total, 4)
    mbox_loc = Concatenate(axis=1, name='mbox_loc')([conv4_3_norm_mbox_loc_reshape,
                                                     fc7_mbox_loc_reshape,
                                                     conv6_2_mbox_loc_reshape,
                                                     conv7_2_mbox_loc_reshape,
                                                     conv8_2_mbox_loc_reshape,
                                                     conv9_2_mbox_loc_reshape,
                                                     conv10_2_mbox_loc_reshape])

    # Output shape of `mbox_priorbox`: (batch, n_boxes_total, 8)
    mbox_priorbox = Concatenate(axis=1, name='mbox_priorbox')([conv4_3_norm_mbox_priorbox_reshape,
                                                               fc7_mbox_priorbox_reshape,
                                                               conv6_2_mbox_priorbox_reshape,
                                                               conv7_2_mbox_priorbox_reshape,
                                                               conv8_2_mbox_priorbox_reshape,
                                                               conv9_2_mbox_priorbox_reshape,
                                                               conv10_2_mbox_priorbox_reshape])

    # The box coordinate predictions will go into the loss function just the way they are,
    # but for the class predictions, we'll apply a softmax activation layer first
    mbox_conf_softmax = Activation('softmax', name='mbox_conf_softmax')(mbox_conf)

    # Concatenate the class and box predictions and the anchors to one large predictions vector
    # Output shape of `predictions`: (batch, n_boxes_total, n_classes + 4 + 8)
    predictions = Concatenate(axis=2, name='predictions')([mbox_conf_softmax, mbox_loc, mbox_priorbox])

    if mode == 'training':
        model = Model(inputs=x, outputs=predictions)
    elif mode == 'inference':
        decoded_predictions = DecodeDetections(confidence_thresh=confidence_thresh,
                                               iou_threshold=iou_threshold,
                                               top_k=top_k,
                                               nms_max_output_size=nms_max_output_size,
                                               coords=coords,
                                               normalize_coords=normalize_coords,
                                               img_height=img_height,
                                               img_width=img_width,
                                               name='decoded_predictions')(predictions)
        model = Model(inputs=x, outputs=decoded_predictions)
    elif mode == 'inference_fast':
        decoded_predictions = DecodeDetectionsFast(confidence_thresh=confidence_thresh,
                                                   iou_threshold=iou_threshold,
                                                   top_k=top_k,
                                                   nms_max_output_size=nms_max_output_size,
                                                   coords=coords,
                                                   normalize_coords=normalize_coords,
                                                   img_height=img_height,
                                                   img_width=img_width,
                                                   name='decoded_predictions')(predictions)
        model = Model(inputs=x, outputs=decoded_predictions)
    else:
        raise ValueError("`mode` must be one of 'training', 'inference' or 'inference_fast', but received '{}'.".format(mode))

    if return_predictor_sizes:
        predictor_sizes = np.array([conv4_3_norm_mbox_conf._keras_shape[1:3],
                                    fc7_mbox_conf._keras_shape[1:3],
                                    conv6_2_mbox_conf._keras_shape[1:3],
                                    conv7_2_mbox_conf._keras_shape[1:3],
                                    conv8_2_mbox_conf._keras_shape[1:3],
                                    conv9_2_mbox_conf._keras_shape[1:3],
                                    conv10_2_mbox_conf._keras_shape[1:3]])
        return model, predictor_sizes
    else:
        return model
コード例 #34
0
def inceptionv3(input,
                dropout_keep_prob=0.8,
                num_classes=1000,
                is_training=True,
                scope='InceptionV3',
                channel_axis=3):

    with tf.name_scope(scope, "InceptionV3", [input]):

        x = conv2d_bn(input, 32, 3, 3, strides=(2, 2), padding='valid')
        x = conv2d_bn(x, 32, 3, 3, padding='valid')
        x = conv2d_bn(x, 64, 3, 3)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv2d_bn(x, 80, 1, 1, padding='valid')
        x = conv2d_bn(x, 192, 3, 3, padding='valid')
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        # mixed 0: 35 x 35 x 256
        branch1x1 = conv2d_bn(x, 64, 1, 1)

        branch5x5 = conv2d_bn(x, 48, 1, 1)
        branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

        branch3x3dbl = conv2d_bn(x, 64, 1, 1)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

        branch_pool = AveragePooling2D((3, 3), strides=(1, 1),
                                       padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
        x = concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                        axis=channel_axis,
                        name='mixed0')

        # mixed 1: 35 x 35 x 288
        branch1x1 = conv2d_bn(x, 64, 1, 1)

        branch5x5 = conv2d_bn(x, 48, 1, 1)
        branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

        branch3x3dbl = conv2d_bn(x, 64, 1, 1)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

        branch_pool = AveragePooling2D((3, 3), strides=(1, 1),
                                       padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
        x = concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                        axis=channel_axis,
                        name='mixed1')

        # mixed 2: 35 x 35 x 288
        branch1x1 = conv2d_bn(x, 64, 1, 1)

        branch5x5 = conv2d_bn(x, 48, 1, 1)
        branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

        branch3x3dbl = conv2d_bn(x, 64, 1, 1)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

        branch_pool = AveragePooling2D((3, 3), strides=(1, 1),
                                       padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
        x = concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                        axis=channel_axis,
                        name='mixed2')

        # mixed 3: 17 x 17 x 768
        branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')

        branch3x3dbl = conv2d_bn(x, 64, 1, 1)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
        branch3x3dbl = conv2d_bn(branch3x3dbl,
                                 96,
                                 3,
                                 3,
                                 strides=(2, 2),
                                 padding='valid')

        branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
        x = concatenate([branch3x3, branch3x3dbl, branch_pool],
                        axis=channel_axis,
                        name='mixed3')

        # mixed 4: 17 x 17 x 768
        branch1x1 = conv2d_bn(x, 192, 1, 1)

        branch7x7 = conv2d_bn(x, 128, 1, 1)
        branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

        branch7x7dbl = conv2d_bn(x, 128, 1, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

        branch_pool = AveragePooling2D((3, 3), strides=(1, 1),
                                       padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                        axis=channel_axis,
                        name='mixed4')

        # mixed 5, 6: 17 x 17 x 768
        for i in range(2):
            branch1x1 = conv2d_bn(x, 192, 1, 1)

            branch7x7 = conv2d_bn(x, 160, 1, 1)
            branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
            branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

            branch7x7dbl = conv2d_bn(x, 160, 1, 1)
            branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
            branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
            branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
            branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

            branch_pool = AveragePooling2D((3, 3),
                                           strides=(1, 1),
                                           padding='same')(x)
            branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
            x = concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                            axis=channel_axis,
                            name='mixed' + str(5 + i))

        # mixed 7: 17 x 17 x 768
        branch1x1 = conv2d_bn(x, 192, 1, 1)

        branch7x7 = conv2d_bn(x, 192, 1, 1)
        branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

        branch7x7dbl = conv2d_bn(x, 192, 1, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

        branch_pool = AveragePooling2D((3, 3), strides=(1, 1),
                                       padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                        axis=channel_axis,
                        name='mixed7')

        loss2_ave_pool = AveragePooling2D(pool_size=(5, 5),
                                          strides=(3, 3),
                                          name='loss2/ave_pool')(x)

        loss2_conv_a = conv2d_bn(loss2_ave_pool, 128, 1, 1)
        loss2_conv_b = conv2d_bn(loss2_conv_a, 768, 5, 5)

        loss2_flat = Flatten()(loss2_conv_b)

        loss2_fc = Dense(1024,
                         activation='relu',
                         name='loss2/fc',
                         kernel_regularizer=l2(0.0002))(loss2_flat)

        loss2_drop_fc = Dropout(dropout_keep_prob)(loss2_fc,
                                                   training=is_training)

        loss2_classifier = Dense(num_classes,
                                 name='loss2/classifier',
                                 kernel_regularizer=l2(0.0002))(loss2_drop_fc)

        # mixed 8: 8 x 8 x 1280
        branch3x3 = conv2d_bn(x, 192, 1, 1)
        branch3x3 = conv2d_bn(branch3x3,
                              320,
                              3,
                              3,
                              strides=(2, 2),
                              padding='valid')

        branch7x7x3 = conv2d_bn(x, 192, 1, 1)
        branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
        branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
        branch7x7x3 = conv2d_bn(branch7x7x3,
                                192,
                                3,
                                3,
                                strides=(2, 2),
                                padding='valid')

        branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
        x = concatenate([branch3x3, branch7x7x3, branch_pool],
                        axis=channel_axis,
                        name='mixed8')

        # mixed 9: 8 x 8 x 2048
        for i in range(2):
            branch1x1 = conv2d_bn(x, 320, 1, 1)

            branch3x3 = conv2d_bn(x, 384, 1, 1)
            branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
            branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
            branch3x3 = concatenate([branch3x3_1, branch3x3_2],
                                    axis=channel_axis,
                                    name='mixed9_' + str(i))

            branch3x3dbl = conv2d_bn(x, 448, 1, 1)
            branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
            branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
            branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
            branch3x3dbl = concatenate([branch3x3dbl_1, branch3x3dbl_2],
                                       axis=channel_axis)

            branch_pool = AveragePooling2D((3, 3),
                                           strides=(1, 1),
                                           padding='same')(x)
            branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
            x = concatenate([branch1x1, branch3x3, branch3x3dbl, branch_pool],
                            axis=channel_axis,
                            name='mixed' + str(9 + i))
        net = x

        # Classification block
        x = GlobalAveragePooling2D(name='avg_pool')(x)

        pool5_drop_10x10_s1 = Dropout(dropout_keep_prob)(x,
                                                         training=is_training)

        loss3_classifier_w = Dense(num_classes,
                                   name='loss3/classifier',
                                   kernel_regularizer=l2(0.0002))

        loss3_classifier = loss3_classifier_w(pool5_drop_10x10_s1)

        w_variables = loss3_classifier_w.get_weights()

        logits = tf.cond(
            tf.equal(is_training, tf.constant(True)),
            lambda: tf.add(loss3_classifier,
                           tf.scalar_mul(tf.constant(0.3), loss2_classifier)),
            lambda: loss3_classifier)

    return logits, net, tf.convert_to_tensor(w_variables[0])
def char_level_neural_net(args):
    """
    This functions trains and saves a character level neural network

    Args:
        None
    
    Returns:
        None
    """
    logger.debug("Running the char_level_neural_net function")

    #Loading the config
    with open(os.path.join("Config","config.yml"), "r") as f:
        config = yaml.safe_load(f)

    #Creating folder for this run
    create_dir(os.path.join("Models", config["char_nn"]["model_name"]))

    #Loading the document
    file = open(os.path.join(config["create_corpus"]["save_location"], "processed_data.txt"), 'r', encoding="UTF-8")
    text = file.read()
    file.close()

    logger.debug("Total characters in the corpus : {}".format(len(text)))

    #Limiting training size based on config:
    if config["gen_training"]["char_nn_training_size"] != -1:
        text = text[0:config["gen_training"]["char_nn_training_size"]]

    logger.debug("After limiting training size, total characters in the corpus : {}".format(len(text)))

    #Generating vocabulary of the characters
    vocab = sorted(set(text))

    logger.debug("Total unique characters in the corpus : {}".format(len(vocab)))

    # Creating a mapping from unique characters to indices
    char2idx = {u:i for i, u in enumerate(vocab)}
    idx2char = {i:u for i, u in enumerate(vocab)}

    #Saving dictionaries
    with open(os.path.join("Models", config["char_nn"]["model_name"], config["char_nn"]["model_name"] + "_char2idx.pickle"), 'wb') as handle:
        pickle.dump(char2idx, handle, protocol=pickle.HIGHEST_PROTOCOL)

    with open(os.path.join("Models", config["char_nn"]["model_name"], config["char_nn"]["model_name"] + "_idx2char.pickle"), 'wb') as handle:
        pickle.dump(idx2char, handle, protocol=pickle.HIGHEST_PROTOCOL)

    logger.debug("Dictionaries created and saved.")

    #Creating training and validation split
    validation_split = config["char_nn"]["validation_split"]
    index_split = round(len(text) * (1-validation_split))
    training_text = text[0:index_split]
    val_text = text[index_split+1:]

    batch_size = config["char_nn"]["batch_size"]
    seq_length = config["char_nn"]["seq_length"]

    #Defining training and validation data generators
    train_gen = char_data_generator(training_text, batch_size, char2idx, seq_length, vocab)
    val_gen = char_data_generator(val_text, batch_size, char2idx, seq_length, vocab)

    #Defining model
    logger.debug("Training data and labels generated. Defining model now.")
    model = Sequential()
    model.add(Embedding(len(vocab) + 1, config["char_nn"]["embedding_dim"],
                                input_length=seq_length,
                                ))

    if config["char_nn"]["rnn_type"] == "lstm":
        if config["char_nn"]["rnn_layers"] > 1:
            for _ in range(config["char_nn"]["rnn_layers"] - 1):
                model.add(LSTM(units = config["char_nn"]["rnn_units"], 
                                return_sequences=True, 
                                recurrent_initializer='glorot_uniform', 
                                dropout=config["char_nn"]["dropout"]
                                ))

        model.add(LSTM(units = config["char_nn"]["rnn_units"], 
                        return_sequences=False, 
                        recurrent_initializer='glorot_uniform', 
                        dropout=config["char_nn"]["dropout"]
                        ))
    
    elif config["char_nn"]["rnn_type"] == "gru":
        if config["char_nn"]["rnn_layers"] > 1:
            for _ in range(config["char_nn"]["rnn_layers"] - 1):
                model.add(GRU(units = config["char_nn"]["rnn_units"], 
                                return_sequences=True, 
                                recurrent_initializer='glorot_uniform', 
                                dropout=config["char_nn"]["dropout"]
                                ))

        model.add(GRU(units = config["char_nn"]["rnn_units"], 
                        return_sequences=False, 
                        recurrent_initializer='glorot_uniform', 
                        dropout=config["char_nn"]["dropout"]
                        ))

    else:
        logger.error("rnn_type should be either 'lstm' or 'gru'.")
        return

    model.add(Dense(len(vocab), 
                        activation='softmax',
                        kernel_regularizer=l2(config["char_nn"]["l2_penalty"]),
                        bias_regularizer=l2(config["char_nn"]["l2_penalty"]),
                        kernel_initializer='glorot_uniform',
                        bias_initializer='zeros'
                        ))

    print(model.summary())
    
    logger.debug("Compiling Model now.")
    model.compile(loss='categorical_crossentropy',
                    optimizer='rmsprop',
                    metrics=['accuracy', 'categorical_crossentropy'])
    
    logger.debug("Fitting model now.")
    
    tstart = datetime.datetime.now()
    
    fit = model.fit_generator(train_gen,
                    steps_per_epoch=(len(training_text) - seq_length)// batch_size,
                    validation_data=val_gen,
                    validation_steps=(len(val_text)  - seq_length)// batch_size,
                    epochs=config["char_nn"]["epochs"],
                    verbose=1)

    train_time = datetime.datetime.now() - tstart

    model.save(os.path.join("Models", config["char_nn"]["model_name"], config["char_nn"]["model_name"] + ".model"))
    logger.info("Final val_categorical_crossentropy = {}".format(fit.history['val_categorical_crossentropy'][-1]))
    logger.info("Training complete. Writing summary and performance file.")

    f = open(os.path.join("Models", config["char_nn"]["model_name"], config["char_nn"]["model_name"] + "_summary.txt"),"w+")
    f.write('Date of run: {} \n'.format(str(datetime.datetime.now())))
    f.write('Model Summary:\n')
    model.summary(print_fn=lambda x: f.write(x + '\n'))

    f.write('\n\n\nModel Parameters:\n')
    f.write('Model Name: {}\n'.format(config["char_nn"]["model_name"]))
    f.write('Train Data Character length: {}\n'.format(config["gen_training"]["char_nn_training_size"]))
    f.write('Sequence Length: {}\n'.format(config["char_nn"]["seq_length"]))
    f.write('Batch Size: {}\n'.format(config["char_nn"]["batch_size"]))
    f.write('Embedding Dimensions: {}\n'.format(config["char_nn"]["embedding_dim"]))
    f.write('RNN Units: {}\n'.format(config["char_nn"]["rnn_units"]))
    f.write('Epochs: {}\n'.format(config["char_nn"]["epochs"]))
    f.write('Validation Split: {}\n'.format(config["char_nn"]["validation_split"]))
    f.write('L2 penalty: {}\n'.format(config["char_nn"]["l2_penalty"]))

    f.write('\n\n\nModel Performance Metrics:\n')
    f.write("val_categorical_crossentropy = {}\n".format(fit.history['val_categorical_crossentropy']))
    f.write("Total Train time = {}".format(train_time))
    f.close()
    
    logger.info('Model Summary Written')

    return
コード例 #36
0
print(len(X_test), 'test sequences')

# In[7]:

print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)


# In[8]:

print('Build model...')
model = Sequential()
model.add(DropoutEmbedding(nb_words + index_from, 128, W_regularizer=l2(weight_decay), p=p_emb))
model.add(DropoutGRU(128, 128, truncate_gradient=maxlen, W_regularizer=l2(weight_decay), 
                      U_regularizer=l2(weight_decay), 
                      b_regularizer=l2(weight_decay), 
                      p_W=p_W, p_U=p_U))
model.add(Dropout(p_dense))
model.add(Dense(128, 1, W_regularizer=l2(weight_decay), b_regularizer=l2(weight_decay)))

#optimiser = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=False)
optimiser = 'adam'
model.compile(loss='mean_squared_error', optimizer=optimiser)


# In[ ]:

# model.load_weights("/scratch/home/Projects/rnn_dropout/exps/DropoutLSTM_weights_00540.hdf5")
コード例 #37
0
def __create_fcn_dense_net(nb_classes, img_input, include_top, nb_dense_block=5, growth_rate=12,
                           reduction=0.0, dropout_rate=None, weight_decay=1e-4,
                           nb_layers_per_block=4, nb_upsampling_conv=128, upsampling_type='upsampling',
                           init_conv_filters=48, input_shape=None, activation='deconv'):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay
        nb_layers_per_block: number of layers in each dense block.
            Can be a positive integer or a list.
            If positive integer, a set number of layers per dense block.
            If list, nb_layer is used as provided. Note that list size must
            be (nb_dense_block + 1)
        nb_upsampling_conv: number of convolutional layers in upsampling via subpixel convolution
        upsampling_type: Can be one of 'upsampling', 'deconv' and 'subpixel'. Defines
            type of upsampling algorithm used.
        input_shape: Only used for shape inference in fully convolutional networks.
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                    Note that if sigmoid is used, classes must be 1.
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if concat_axis == 1:  # channels_first dim ordering
        _, rows, cols = input_shape
    else:
        rows, cols, _ = input_shape

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # check if upsampling_conv has minimum number of filters
    # minimum is set to 12, as at least 3 color channels are needed for correct upsampling
    assert nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0, 'Parameter `upsampling_conv` number of channels must ' \
                                                                    'be a positive number divisible by 4 and greater ' \
                                                                    'than 12'

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block + 1), 'If list, nb_layer is used as provided. ' \
                                                       'Note that list size must be (nb_dense_block + 1)'

        bottleneck_nb_layers = nb_layers[-1]
        rev_layers = nb_layers[::-1]
        nb_layers.extend(rev_layers[1:])
    else:
        bottleneck_nb_layers = nb_layers_per_block
        nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1)

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    x = Conv2D(init_conv_filters, (7, 7), kernel_initializer='he_normal', padding='same', name='initial_conv2D',
               use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)
    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)

    nb_filter = init_conv_filters

    skip_list = []

    # Add dense blocks and transition down block
    for block_idx in range(nb_dense_block):
        x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)

        # Skip connection
        skip_list.append(x)

        # add transition_block
        x = __transition_block(x, nb_filter, compression=compression, weight_decay=weight_decay)

        nb_filter = int(nb_filter * compression)  # this is calculated inside transition_down_block

    # The last dense_block does not have a transition_down_block
    # return the concatenated feature maps without the concatenation of the input
    _, nb_filter, concat_list = __dense_block(x, bottleneck_nb_layers, nb_filter, growth_rate,
                                              dropout_rate=dropout_rate, weight_decay=weight_decay,
                                              return_concat_list=True)

    skip_list = skip_list[::-1]  # reverse the skip list

    # Add dense blocks and transition up block
    for block_idx in range(nb_dense_block):
        n_filters_keep = growth_rate * nb_layers[nb_dense_block + block_idx]

        # upsampling block must upsample only the feature maps (concat_list[1:]),
        # not the concatenation of the input with the feature maps (concat_list[0].
        l = concatenate(concat_list[1:], axis=concat_axis)

        t = __transition_up_block(l, nb_filters=n_filters_keep, type=upsampling_type, weight_decay=weight_decay)

        # concatenate the skip connection with the transition block
        x = concatenate([t, skip_list[block_idx]], axis=concat_axis)

        # Dont allow the feature map size to grow in upsampling dense blocks
        x_up, nb_filter, concat_list = __dense_block(x, nb_layers[nb_dense_block + block_idx + 1], nb_filter=growth_rate,
                                                     growth_rate=growth_rate, dropout_rate=dropout_rate,
                                                     weight_decay=weight_decay, return_concat_list=True,
                                                     grow_nb_filters=False)

    if include_top:
        x = Conv2D(nb_classes, (1, 1), activation='linear', padding='same', use_bias=False)(x_up)

        if K.image_data_format() == 'channels_first':
            channel, row, col = input_shape
        else:
            row, col, channel = input_shape

        x = Reshape((row * col, nb_classes))(x)
        x = Activation(activation)(x)
        x = Reshape((row, col, nb_classes))(x)
    else:
        x = x_up

    return x
コード例 #38
0
    def _construct_siamese_architecture(self, learning_rate_multipliers,
                                        l2_regularization_penalization):
        """ Constructs the siamese architecture and stores it in the class

        Arguments:
            learning_rate_multipliers
            l2_regularization_penalization
        """

        # Let's define the cnn architecture
        convolutional_net = Sequential()
        convolutional_net.add(
            Conv2D(filters=64,
                   kernel_size=(10, 10),
                   activation='relu',
                   input_shape=self.input_shape,
                   kernel_regularizer=l2(
                       l2_regularization_penalization['Conv1']),
                   name='Conv1'))
        convolutional_net.add(MaxPool2D())

        convolutional_net.add(
            Conv2D(filters=128,
                   kernel_size=(7, 7),
                   activation='relu',
                   kernel_regularizer=l2(
                       l2_regularization_penalization['Conv2']),
                   name='Conv2'))
        convolutional_net.add(MaxPool2D())

        convolutional_net.add(
            Conv2D(filters=128,
                   kernel_size=(4, 4),
                   activation='relu',
                   kernel_regularizer=l2(
                       l2_regularization_penalization['Conv3']),
                   name='Conv3'))
        convolutional_net.add(MaxPool2D())

        convolutional_net.add(
            Conv2D(filters=256,
                   kernel_size=(4, 4),
                   activation='relu',
                   kernel_regularizer=l2(
                       l2_regularization_penalization['Conv4']),
                   name='Conv4'))

        convolutional_net.add(Flatten())
        convolutional_net.add(
            Dense(units=4096,
                  activation='sigmoid',
                  kernel_regularizer=l2(
                      l2_regularization_penalization['Dense1']),
                  name='Dense1'))

        # Now the pairs of images
        input_image_1 = Input(self.input_shape)
        input_image_2 = Input(self.input_shape)

        encoded_image_1 = convolutional_net(input_image_1)
        encoded_image_2 = convolutional_net(input_image_2)

        # L1 distance layer between the two encoded outputs
        # One could use Subtract from Keras, but we want the absolute value
        l1_distance_layer = Lambda(
            lambda tensors: K.abs(tensors[0] - tensors[1]))
        l1_distance = l1_distance_layer([encoded_image_1, encoded_image_2])

        # Same class or not prediction
        prediction = Dense(units=1, activation='sigmoid')(l1_distance)
        self.model = Model(inputs=[input_image_1, input_image_2],
                           outputs=prediction)

        # Define the optimizer and compile the model
        optimizer = Modified_SGD(lr=self.learning_rate,
                                 lr_multipliers=learning_rate_multipliers,
                                 momentum=0.5)

        self.model.compile(loss='binary_crossentropy',
                           metrics=['binary_accuracy'],
                           optimizer=optimizer)
コード例 #39
0
ファイル: 3test.py プロジェクト: nguyenvietbac/OFM_matrix
    X = ELU(alpha=0.3)(X)
    return X

file_running = "3"
########################
"""ofm input"""
train_r = 1
load_w = 1
lambda_val = 2e-5
trainable=True
################################
# mylayer = my_mtom(10)
ofm_input = Input(shape=(None, 588), name='ofm_input')

# print((list(ofm_input)).shape)
x = Dense(30, kernel_regularizer=l2(lambda_val))(ofm_input)
x = ELU(alpha=0.3)(x)
x = res_block(x, size=30, layers=10, lamb=lambda_val)
# x = LSTM(30, return_sequences=True, trainable=trainable, name='ofm_lstm1', kernel_regularizer=l2(lambda_val))(x)
x = LSTM(30, return_sequences=False, trainable=trainable, name='ofm_lstm2', kernel_regularizer=l2(lambda_val))(x)

##################################
# lambda_val = 3e-4
atoms_input = Input(shape=(47, ), name='atoms_input')
y = Dense(20, kernel_regularizer=l2(lambda_val))(atoms_input)
y = ELU(alpha=0.3)(y)
y = res_block(y, size=20, layers=1, lamb=lambda_val)

####################################
# lambda_val = 3e-4
# x = keras.layers.Concatenate([x ,y],axis=-1)
コード例 #40
0
def get_model5():
    model = Sequential()
    model.add(Dense(8, activation='relu', kernel_regularizer=regularizers.l2(0.01)))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[auc_roc])
    return model
コード例 #41
0
    def f(input_tensor):
        nb_filter1, nb_filter2, nb_filter3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a', W_regularizer=l2(weight_decay))(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = AtrousConvolution2D(nb_filter2, kernel_size, kernel_size, atrous_rate=atrous_rate,
                          border_mode='same', name=conv_name_base + '2b', W_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c', W_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)

        x = merge([x, input_tensor], mode='sum')
        x = Activation('relu')(x)
        return x
コード例 #42
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Add hidden layers
        net = layers.Dense(units=128,
                           activation='relu',
                           kernel_initializer='glorot_uniform',
                           kernel_regularizer=regularizers.l2(0.01))(states)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.01)(net)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.
        net = layers.Dense(units=256,
                           activation='relu',
                           kernel_initializer='glorot_uniform',
                           kernel_regularizer=regularizers.l2(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.01)(net)
        net = layers.Dense(units=256,
                           activation='relu',
                           kernel_initializer='glorot_uniform',
                           kernel_regularizer=regularizers.l2(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.01)(net)
        net = layers.Dense(units=128,
                           activation='relu',
                           kernel_initializer='glorot_uniform',
                           kernel_regularizer=regularizers.l2(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Dropout(0.01)(net)
        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(
            units=self.action_size,
            activation='sigmoid',
            kernel_initializer='random_uniform',
            kernel_regularizer=regularizers.l2(0.01)
            #                                    ,activity_regularizer=regularizers.l2(0.01)
            ,
            name='raw_actions')(net)

        # Scale [0, 1] output for each action dimension to proper range

        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # TODO: check loss function

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=0.0001, clipvalue=0.5)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[loss],
            updates=updates_op)
def __create_fcn_dense_net(nb_classes,
                           img_input,
                           include_top,
                           nb_dense_block=5,
                           growth_rate=12,
                           reduction=0.0,
                           dropout_rate=None,
                           weight_decay=1E-4,
                           nb_layers_per_block=4,
                           nb_upsampling_conv=128,
                           upsampling_type='upsampling',
                           batchsize=None,
                           init_conv_filters=48,
                           input_shape=None):
    ''' Build the DenseNet model

    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay
        nb_layers_per_block: number of layers in each dense block.
            Can be a positive integer or a list.
            If positive integer, a set number of layers per dense block.
            If list, nb_layer is used as provided. Note that list size must
            be (nb_dense_block + 1)
        nb_upsampling_conv: number of convolutional layers in upsampling via subpixel convolution
        upsampling_type: Can be one of 'upsampling', 'deconv', 'atrous' and
            'subpixel'. Defines type of upsampling algorithm used.
        batchsize: Fixed batch size. This is a temporary requirement for
            computation of output shape in the case of Deconvolution2D layers.
            Parameter will be removed in next iteration of Keras, which infers
            output shape of deconvolution layers automatically.
        input_shape: Only used for shape inference in fully convolutional networks.

    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_dim_ordering() == "th" else -1

    if concat_axis == 1:  # th dim ordering
        _, rows, cols = input_shape
    else:
        rows, cols, _ = input_shape

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, "reduction value must lie between 0.0 and 1.0"

    # check if upsampling_conv has minimum number of filters
    # minimum is set to 12, as at least 3 color channels are needed for correct upsampling
    assert nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0, "Parameter `upsampling_conv` number of channels must " \
                                                                    "be a positive number divisible by 4 and greater " \
                                                                    "than 12"

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block + 1), "If list, nb_layer is used as provided. " \
                                                       "Note that list size must be (nb_dense_block + 1)"

        bottleneck_nb_layers = nb_layers[-1]
        rev_layers = nb_layers[::-1]
        nb_layers.extend(rev_layers[1:])
    else:
        bottleneck_nb_layers = nb_layers_per_block
        nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1)

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    x = Convolution2D(init_conv_filters,
                      3,
                      3,
                      init="he_uniform",
                      border_mode="same",
                      name="initial_conv2D",
                      bias=False,
                      W_regularizer=l2(weight_decay))(img_input)

    nb_filter = init_conv_filters

    skip_list = []

    # Add dense blocks and transition down block
    for block_idx in range(nb_dense_block):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)

        # Skip connection
        skip_list.append(x)

        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

        nb_filter = int(
            nb_filter *
            compression)  # this is calculated inside transition_down_block

    # The last dense_block does not have a transition_down_block
    # return the concatenated feature maps without the concatenation of the input
    _, nb_filter, concat_list = __dense_block(x,
                                              bottleneck_nb_layers,
                                              nb_filter,
                                              growth_rate,
                                              dropout_rate=dropout_rate,
                                              weight_decay=weight_decay,
                                              return_concat_list=True)

    skip_list = skip_list[::-1]  # reverse the skip list

    if K.image_dim_ordering() == 'th':
        out_shape = [batchsize, nb_filter, rows // 16, cols // 16]
    else:
        out_shape = [batchsize, rows // 16, cols // 16, nb_filter]

    # Add dense blocks and transition up block
    for block_idx in range(nb_dense_block):
        n_filters_keep = growth_rate * nb_layers[nb_dense_block + block_idx]

        if K.image_dim_ordering() == 'th':
            out_shape[1] = n_filters_keep
        else:
            out_shape[3] = n_filters_keep

        # upsampling block must upsample only the feature maps (concat_list[1:]),
        # not the concatenation of the input with the feature maps (concat_list[0].
        l = merge(concat_list[1:], mode='concat', concat_axis=concat_axis)

        t = __transition_up_block(l,
                                  nb_filters=n_filters_keep,
                                  type=upsampling_type,
                                  output_shape=out_shape)

        # concatenate the skip connection with the transition block
        x = merge([t, skip_list[block_idx]],
                  mode='concat',
                  concat_axis=concat_axis)

        if K.image_dim_ordering() == 'th':
            out_shape[2] *= 2
            out_shape[3] *= 2
        else:
            out_shape[1] *= 2
            out_shape[2] *= 2

        # Dont allow the feature map size to grow in upsampling dense blocks
        _, nb_filter, concat_list = __dense_block(x,
                                                  nb_layers[nb_dense_block +
                                                            block_idx + 1],
                                                  nb_filter=growth_rate,
                                                  growth_rate=growth_rate,
                                                  dropout_rate=dropout_rate,
                                                  weight_decay=weight_decay,
                                                  return_concat_list=True,
                                                  grow_nb_filters=False)

    if include_top:
        x = Convolution2D(nb_classes,
                          1,
                          1,
                          activation='linear',
                          border_mode='same',
                          W_regularizer=l2(weight_decay),
                          bias=False)(x)

        if K.image_dim_ordering() == 'th':
            channel, row, col = input_shape
        else:
            row, col, channel = input_shape

        x = Reshape((row * col, nb_classes))(x)
        x = Activation('softmax')(x)
        x = Reshape((row, col, nb_classes))(x)

    return x
コード例 #44
0
def VGG19(weights='imagenet',
          input_tensor=None,
          weight_decay=0,
          no_cats=2,
          activation='softmax'):
    """
    Builds the entire model, excluding the final fully connected layer.
    Adds a randomly initialized, fully connected layer to the end.
    Feed the input tensor as thus:
        input_tensor=keras.layers.Input(shape=(224, 224, 3))
    """
    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    img_input = input_tensor
    # Block 1
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               kernel_regularizer=l2(weight_decay))(img_input)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv4',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv4',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3',
               kernel_regularizer=l2(weight_decay))(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv4',
               kernel_regularizer=l2(weight_decay))(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    model = Model(img_input, x, name='vgg19')

    # load weights
    if weights == 'imagenet':
        weights_path = get_file(
            'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
            WEIGHTS_PATH_NO_TOP,
            cache_subdir='models')
        model.load_weights(weights_path)
        if K.backend() == 'theano':
            layer_utils.convert_all_kernels_in_model(model)

        if K.image_data_format() == 'channels_first':
            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')

    x = Flatten()(model.output)
    x = Dense(no_cats,
              activation=activation,
              kernel_regularizer=l2(weight_decay),
              name='fc_final')(x)
    model = Model(inputs=model.input, outputs=x)

    return model
コード例 #45
0
ファイル: cnn.py プロジェクト: artemmam/cnn-short-prot-pred
test = load_data('test')
test_label = load_data('test_label')
valid = load_data('valid')
valid_label = load_data('valid_label')
print('Data are loaded')

# ## CNN для предсказание опорной матрицы
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
keras.backend.set_session(sess)

batch_size = 4
epochs = 25

reg = l2(l2_lambda)
init = "he_normal"

mm1 = 30
mm = 30
#1 слой
#model.add(Embedding(100, 8, input_length=mm))
print('Creating CNN for prediction')

num_classes = np.shape(train_label)[2]

input_layer = Input(shape=(mm, 56, 1))
layer1 = (Conv2D(8, (mm, 5),
                 activation='linear',
                 W_regularizer=l2(l2_lambda),
                 padding='same'))(input_layer)
コード例 #46
0
def get_cnn(input_shape, num_outputs, l2_num, num_filters, filter_sizes,
            learning_rate, dropout_conv):  # cnn模型构建

    # filter_sizes=[3,4,5]
    embedding_dim = input_shape[1]
    sequence_length = input_shape[0]

    l2_strength = l2_num

    inputs = Input(shape=input_shape)
    inputs_drop = Dropout(dropout_conv)(inputs)

    filter_size = int(filter_sizes[0])
    conv_1 = Conv1D(filters=num_filters,
                    kernel_size=filter_size,
                    strides=1,
                    activation='relu',
                    kernel_regularizer=regularizers.l2(l2_strength))(
                        inputs_drop)  # 卷积size为1 滑动strides为1
    pool_1 = AveragePooling1D(pool_size=input_shape[0] - filter_size + 1,
                              strides=1)(conv_1)  # 均值池化
    pool_drop_1 = Dropout(dropout_conv)(pool_1)

    filter_size = int(filter_sizes[1])
    conv_2 = Conv1D(
        filters=num_filters,
        kernel_size=filter_size,
        strides=1,
        activation='relu',
        kernel_regularizer=regularizers.l2(l2_strength))(inputs_drop)
    pool_2 = AveragePooling1D(pool_size=input_shape[0] - filter_size + 1,
                              strides=1)(conv_2)
    pool_drop_2 = Dropout(dropout_conv)(pool_2)

    filter_size = int(filter_sizes[2])
    conv_3 = Conv1D(
        filters=num_filters,
        kernel_size=filter_size,
        strides=1,
        activation='relu',
        kernel_regularizer=regularizers.l2(l2_strength))(inputs_drop)
    pool_3 = AveragePooling1D(pool_size=input_shape[0] - filter_size + 1,
                              strides=1)(conv_3)
    pool_drop_3 = Dropout(dropout_conv)(pool_3)

    concatenated = Concatenate(axis=1)([pool_drop_1, pool_drop_2, pool_drop_3])

    dense = Dense(128,
                  activation='relu',
                  kernel_regularizer=regularizers.l2(l2_strength))(
                      Flatten()(concatenated))  # 全连接
    dense_drop = Dropout(.5)(dense)

    output = Dense(units=num_outputs,
                   activation='sigmoid',
                   kernel_regularizer=regularizers.l2(l2_strength))(dense_drop)

    #create
    model = Model(inputs=inputs, outputs=output)
    optimizer = Adam(lr=learning_rate)
    model.compile(loss='binary_crossentropy', optimizer=optimizer)

    return model
コード例 #47
0
def Deeplab_v3p(input_shape,
                n_class,
                encoder_name,
                encoder_weights=None,
                weight_decay=1e-4,
                kernel_initializer="he_normal",
                bn_epsilon=1e-3,
                bn_momentum=0.99):
    """ implementation of Deeplab v3+ for semantic segmentation.
        ref: Chen et al. Chen L C, Zhu Y, Papandreou G, et al. Encoder-Decoder with Atrous Separable
             Convolution for Semantic Image Segmentation[J]. arXiv preprint arXiv:1802.02611, 2018.,
             2018, arXiv:1802.02611.
    :param input_shape: tuple, i.e., (height, width, channel).
    :param n_class: int, number of class, must >= 2.
    :param encoder_name: string, name of encoder.
    :param encoder_weights: string, path of weights, default None.
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: a Keras Model instance.
    """
    encoder = build_encoder(input_shape,
                            encoder_name,
                            encoder_weights=encoder_weights,
                            weight_decay=weight_decay,
                            kernel_initializer=kernel_initializer,
                            bn_epsilon=bn_epsilon,
                            bn_momentum=bn_momentum)
    net = encoder.get_layer(scope_table[encoder_name]["pool4"]).output
    net = atrous_spatial_pyramid_pooling(net,
                                         n_filters=256,
                                         rates=[6, 12, 18],
                                         imagelevel=True,
                                         weight_decay=weight_decay,
                                         kernel_initializer=kernel_initializer,
                                         bn_epsilon=bn_epsilon,
                                         bn_momentum=bn_momentum)
    net = Conv2D(256, (1, 1),
                 use_bias=False,
                 activation=None,
                 kernel_regularizer=l2(weight_decay),
                 kernel_initializer=kernel_initializer)(net)
    net = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(net)
    net = Activation("relu")(net)
    net = Dropout(0.1)(net)
    decoder_features = BilinearUpSampling(target_size=(input_shape[0] // 4,
                                                       input_shape[1] //
                                                       4))(net)

    encoder_features = encoder.get_layer(
        scope_table[encoder_name]["pool2"]).output
    encoder_features = Conv2D(
        48, (1, 1),
        use_bias=False,
        activation=None,
        kernel_regularizer=l2(weight_decay),
        kernel_initializer=kernel_initializer)(encoder_features)
    encoder_features = BatchNormalization(
        epsilon=bn_epsilon, momentum=bn_momentum)(encoder_features)
    encoder_features = Activation("relu")(encoder_features)
    net = Concatenate()([encoder_features, decoder_features])

    net = separable_conv_bn(net,
                            256,
                            'decoder_conv1',
                            depth_activation=True,
                            weight_decay=weight_decay,
                            kernel_initializer=kernel_initializer,
                            bn_epsilon=bn_epsilon,
                            bn_momentum=bn_momentum)
    net = separable_conv_bn(net,
                            256,
                            'decoder_conv2',
                            depth_activation=True,
                            weight_decay=weight_decay,
                            kernel_initializer=kernel_initializer,
                            bn_epsilon=bn_epsilon,
                            bn_momentum=bn_momentum)
    net = Dropout(0.1)(net)

    net = BilinearUpSampling(target_size=(input_shape[0], input_shape[1]))(net)
    output = Conv2D(n_class, (1, 1),
                    activation=None,
                    kernel_regularizer=l2(weight_decay),
                    kernel_initializer=kernel_initializer)(net)
    output = Activation("softmax")(output)

    return Model(encoder.input, output)
コード例 #48
0
def conv_lstm_4(left_hand_input, skeleton_input, right_hand_input):

    # global cnn_encode
    input_left_hand = Input(shape=left_hand_input)
    input_skeleton = Input(shape=skeleton_input)
    input_right_hand = Input(shape=right_hand_input)

    left_model = TimeDistributed(Dense(512,
                                       activation='relu'))(input_left_hand)
    left_model = TimeDistributed(Dropout(drop_out))(left_model)
    left_model = LSTM(units=512, return_sequences = True, recurrent_dropout=drop_out, \
    bias_regularizer=l2(0.001), kernel_regularizer=l2(0.001), \
    recurrent_regularizer=l2(0.001))(left_model)
    left_model = TimeDistributed(Dropout(drop_out))(left_model)
    left_model = TimeDistributed(BatchNormalization())(left_model)

    right_model = TimeDistributed(Dense(512,
                                        activation='relu'))(input_right_hand)
    right_model = TimeDistributed(Dropout(drop_out))(right_model)
    right_model = LSTM(units=512, return_sequences = True, recurrent_dropout=drop_out, \
    bias_regularizer=l2(0.001), kernel_regularizer=l2(0.001), \
    recurrent_regularizer=l2(0.001))(right_model)
    right_model = TimeDistributed(Dropout(drop_out))(right_model)
    right_model = TimeDistributed(BatchNormalization())(right_model)

    skeleton_model = TimeDistributed(Dense(256,
                                           activation='relu'))(input_skeleton)
    skeleton_model = TimeDistributed(Dropout(drop_out))(skeleton_model)
    skeleton_model = LSTM(units=512, return_sequences = True, recurrent_dropout=drop_out, \
    bias_regularizer=l2(0.001), kernel_regularizer=l2(0.001), \
    recurrent_regularizer=l2(0.001))(skeleton_model)
    skeleton_model = TimeDistributed(Dropout(drop_out))(skeleton_model)
    skeleton_model = TimeDistributed(BatchNormalization())(skeleton_model)

    concat_img_and_pv = concatenate([left_model, skeleton_model, right_model])
    # concat_img_and_pv = left_model

    # full_model = TimeDistributed(Dense(256, activation='relu'))(concat_img_and_pv)
    full_model = TimeDistributed(Dense(1536,
                                       activation='relu'))(concat_img_and_pv)
    full_model = LSTM(units=1024, return_sequences = True, recurrent_dropout=drop_out, \
    bias_regularizer=l2(0.001), kernel_regularizer=l2(0.001), \
    recurrent_regularizer=l2(0.001))(full_model)
    full_model = TimeDistributed(Dropout(drop_out))(full_model)
    full_model = TimeDistributed(BatchNormalization())(full_model)

    full_model = LSTM(units=1024, return_sequences = False, recurrent_dropout=drop_out, \
    bias_regularizer=l2(0.001), kernel_regularizer=l2(0.001), \
    recurrent_regularizer=l2(0.001))(full_model)
    full_model = Dense(1024, activation="relu")(full_model)
    full_model = Dropout(drop_out)(full_model)
    full_model = Dense(249, activation="softmax")(full_model)

    full_model = Model(
        inputs=[input_left_hand, input_skeleton, input_right_hand],
        outputs=full_model)
    return full_model
コード例 #49
0
    def createRegularizedModel(self, inputs, outputs, hiddenLayers,
                               activationType, learningRate):
        bias = True
        dropout = 0
        regularizationFactor = 0.01
        model = Sequential()
        if len(hiddenLayers) == 0:
            model.add(
                Dense(self.output_size,
                      input_shape=(self.input_size, ),
                      kernel_initializer='lecun_uniform',
                      bias=bias))
            model.add(Activation("linear"))
        else:
            if regularizationFactor > 0:
                model.add(
                    Dense(hiddenLayers[0],
                          input_shape=(self.input_size, ),
                          kernel_initializer='lecun_uniform',
                          W_regularizer=l2(regularizationFactor),
                          bias=bias))
            else:
                model.add(
                    Dense(hiddenLayers[0],
                          input_shape=(self.input_size, ),
                          kernel_initializer='lecun_uniform',
                          bias=bias))

            if activationType == "LeakyReLU":
                model.add(LeakyReLU(alpha=0.01))
            else:
                model.add(Activation(activationType))

            for index in range(1, len(hiddenLayers)):
                layerSize = hiddenLayers[index]
                if regularizationFactor > 0:
                    model.add(
                        Dense(layerSize,
                              kernel_initializer='lecun_uniform',
                              W_regularizer=l2(regularizationFactor),
                              bias=bias))
                else:
                    model.add(
                        Dense(layerSize,
                              kernel_initializer='lecun_uniform',
                              bias=bias))
                if activationType == "LeakyReLU":
                    model.add(LeakyReLU(alpha=0.01))
                else:
                    model.add(Activation(activationType))
                if dropout > 0:
                    model.add(Dropout(dropout))
            model.add(
                Dense(self.output_size,
                      kernel_initializer='lecun_uniform',
                      bias=bias))
            model.add(Activation("linear"))
        optimizer = optimizers.RMSprop(lr=learningRate, rho=0.9, epsilon=1e-06)
        model.compile(loss="mse", optimizer=optimizer)
        model.summary()
        return model
コード例 #50
0
def DenseNet(input_shape=None,
             dense_blocks=3,
             dense_layers=-1,
             growth_rate=12,
             nb_classes=None,
             dropout_rate=None,
             bottleneck=False,
             compression=1.0,
             weight_decay=1e-4,
             depth=40):
    """
        input_shape  : shape of the input images. E.g. (28,28,1) for MNIST    
        dense_blocks : amount of dense blocks that will be created (default: 3)    
        dense_layers : number of layers in each dense block. You can also use a list for numbers of layers [2,4,3]
                       or define only 2 to add 2 layers at all dense blocks. -1 means that dense_layers will be calculated
                       by the given depth (default: -1)
        growth_rate  : number of filters to add per dense block (default: 12)
        nb_classes   : number of classes
        dropout_rate : defines the dropout rate that is accomplished after each conv layer (except the first one).
                       In the paper the authors recommend a dropout of 0.2 (default: None)
        bottleneck   : (True / False) if true it will be added in  block (default: False)
        compression  : reduce the number of feature-maps at transition layer. In the paper the authors recomment a compression
                       of 0.5 (default: 1.0 - will have no compression effect)
        weight_decay : weight decay of L2 regularization on weights (default: 1e-4)
        depth        : number or layers (default: 40)
    Returns:
        Model        : A Keras model instance
    """

    if nb_classes == None:
        raise Exception(
            'Please define number of classes (e.g. num_classes=10). This is required for final softmax.'
        )

    if compression <= 0.0 or compression > 1.0:
        raise Exception(
            'Compression have to be a value between 0.0 and 1.0. If you set compression to 1.0 it will be turn off.'
        )

    if type(dense_layers) is list:
        if len(dense_layers) != dense_blocks:
            raise AssertionError(
                'Number of dense blocks have to be same length to specified layers'
            )
    elif dense_layers == -1:
        if bottleneck:
            dense_layers = (depth - (dense_blocks + 1)) / dense_blocks // 2
        else:
            dense_layers = (depth - (dense_blocks + 1)) // dense_blocks
        dense_layers = [int(dense_layers) for _ in range(dense_blocks)]
    else:
        dense_layers = [int(dense_layers) for _ in range(dense_blocks)]
    print(dense_layers)
    img_input = Input(shape=input_shape)
    nb_channels = growth_rate * 2

    print('Creating DenseNet')
    print('#############################################')
    print('Dense blocks: %s' % dense_blocks)
    print('Layers per dense block: %s' % dense_layers)
    print('#############################################')

    # Initial convolution layer
    x = Conv2D(nb_channels, (3, 3),
               padding='same',
               strides=(1, 1),
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)

    # Building dense blocks
    for block in range(dense_blocks):

        # Add dense block
        x, nb_channels = dense_block(x, dense_layers[block], nb_channels,
                                     growth_rate, dropout_rate, bottleneck,
                                     weight_decay)

        if block < dense_blocks - 1:  # if it's not the last dense block
            # Add transition_block
            x = transition_layer(x, nb_channels, dropout_rate, compression,
                                 weight_decay)
            nb_channels = int(nb_channels * compression)

    x = BatchNormalization(gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)
    x = Dense(25,
              activation='sigmoid',
              kernel_regularizer=l2(weight_decay),
              bias_regularizer=l2(weight_decay))(x)

    model_name = None
    if growth_rate >= 36:
        model_name = 'widedense'
    else:
        model_name = 'dense'

    if bottleneck:
        model_name = model_name + 'b'

    if compression < 1.0:
        model_name = model_name + 'c'

    return Model(img_input, x, name=model_name), model_name
コード例 #51
0
from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D, Dense, BatchNormalization, Dropout
from tensorflow.keras.layers import Flatten
from keras.regularizers import l2
from keras.datasets import cifar10
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator

model = Sequential()
model.add(
    Conv2D(32,
           kernel_size=(5, 5),
           activation='relu',
           kernel_regularizer=l2(0.001),
           input_shape=(224, 224, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(
    Conv2D(64,
           kernel_size=(5, 5),
           kernel_regularizer=l2(0.001),
           activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(
    Conv2D(64,
           kernel_size=(5, 5),
           kernel_regularizer=l2(0.001),
           activation='relu'))
コード例 #52
0
printing(
    "---------------------------------------------------------------------------------"
)

model = Sequential()

model.add(Activation('linear',
                     input_shape=(channels, patchHeight, patchWidth)))  # 32
model.add(
    Convolution2D(64,
                  3,
                  3,
                  border_mode='valid',
                  trainable=True,
                  init=initialization,
                  W_regularizer=l2(regularizer),
                  subsample=(1, 1),
                  activation="relu"))  # 30
model.add(
    Convolution2D(64,
                  3,
                  3,
                  border_mode='valid',
                  trainable=True,
                  init=initialization,
                  W_regularizer=l2(regularizer),
                  subsample=(1, 1),
                  activation="relu"))  # 28
model.add(MaxPooling2D(pool_size=(2, 2)))  # 14

# ------------------------------------------------------------------------------------------------------------------------------------------------ #
コード例 #53
0
ファイル: keras-mlp-binart.py プロジェクト: songfang/DataPrep
n_hidden = int((n_inputs + n_outputs ) * hidden_factor)  # of hidden units - use 2 x # of outputs
N = len(x_train) # Number of samples minus header

print"Inputs = ", n_inputs
print"Hidden layer nodes = ", n_hidden
print"Outputs = ",n_outputs, "\n"

########### Build model
model = Sequential()

#input to first hidden layer
model.add(Dense(n_hidden, input_dim=n_inputs, 
                kernel_initializer='random_uniform',
                use_bias = True,
                bias_initializer='zeros',
                kernel_regularizer = R.l2(L2_reg),
                activity_regularizer = R.l1(L1_reg),
                activation='relu'))
#first hidden layer to second hidden layer with dropout
model.add(Dropout(drop_out))
model.add(Dense(n_hidden, 
                kernel_initializer='random_uniform',
                use_bias = True,
                bias_initializer='zeros',
                kernel_regularizer = R.l2(L2_reg),
                activity_regularizer = R.l1(L1_reg),
                activation='relu'))

#2nd hidden layer to output layer with dropout
model.add(Dropout(drop_out))
model.add(Dense(n_outputs, 
コード例 #54
0
def build_model(frames=128,
                bands=128,
                channels=1,
                num_labels=10,
                conv_size=(5, 5),
                conv_block='conv',
                downsample_size=(4, 2),
                fully_connected=64,
                n_stages=None,
                n_blocks_per_stage=None,
                filters=24,
                kernels_growth=2,
                dropout=0.5,
                use_strides=False):
    """
    Implements SB-CNN model from
    Deep Convolutional Neural Networks and Data Augmentation for Environmental Sound Classification
    Salamon and Bello, 2016.
    https://arxiv.org/pdf/1608.04363.pdf

    Based on https://gist.github.com/jaron/5b17c9f37f351780744aefc74f93d3ae
    but parameters are changed back to those of the original paper authors,
    and added Batch Normalization
    """
    Conv2 = SeparableConv2D if conv_block == 'depthwise_separable' else Convolution2D
    assert conv_block in ('conv', 'depthwise_separable')
    kernel = conv_size
    if use_strides:
        strides = downsample_size
        pool = (1, 1)
    else:
        strides = (1, 1)
        pool = downsample_size

    block1 = [
        Convolution2D(filters,
                      kernel,
                      padding='same',
                      strides=strides,
                      input_shape=(bands, frames, channels)),
        BatchNormalization(),
        MaxPooling2D(pool_size=pool),
        Activation('relu'),
    ]
    block2 = [
        Conv2(filters * kernels_growth,
              kernel,
              padding='same',
              strides=strides),
        BatchNormalization(),
        MaxPooling2D(pool_size=pool),
        Activation('relu'),
    ]
    block3 = [
        Conv2(filters * kernels_growth,
              kernel,
              padding='valid',
              strides=strides),
        BatchNormalization(),
        Activation('relu'),
    ]
    backend = [
        Flatten(),
        Dropout(dropout),
        Dense(fully_connected, kernel_regularizer=l2(0.001)),
        Activation('relu'),
        Dropout(dropout),
        Dense(num_labels, kernel_regularizer=l2(0.001)),
        Activation('softmax'),
    ]
    layers = block1 + block2 + block3 + backend
    model = Sequential(layers)
    return model
コード例 #55
0
def makeModel():
    global batch_size, nW2V, nAttributes, nVis, reg

    # visual = Input(shape=(nVis,))
    w2v = Input(shape=(nW2V, ))

    dense1 = Dense(256,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(w2v)
    activ1 = Activation('relu')(dense1)

    dense2 = Dense(128,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(activ1)
    activ2 = Activation('relu')(dense1)

    dense3 = Dense(nAttributes,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(activ2)
    activ3 = Activation('relu')(dense3)

    dense4 = Dense(512,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(activ3)
    activ4 = Activation('relu')(dense4)

    dense5 = Dense(1600,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(activ4)
    activ5 = Activation('relu')(dense5)

    dense6 = Dense(nVis,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(activ5)
    activ6 = Activation('relu')(dense6)

    bilinear = Model(inputs=[w2v], outputs=[activ3, activ6])

    return bilinear
コード例 #56
0
 def __init__(self,
              input_shape,
              num_labels,
              num_deltas=0,
              weight_dir=None,
              fine_tuning=False,
              tensorboard_dir=None,
              cmvn_path=None,
              concatenate=True,
              double_weights=False):
     if not input_shape[1]:
         raise ValueError('bank size must be fixed')
     if K.image_dim_ordering() != 'tf':
         # not sure if I'm right, but I think the TimeDistributed
         # wrapper will always take axis 1, which could be the
         # channel axis in Theano
         raise ValueError('dimensions must be tensorflow-ordered')
     if weight_dir is not None and not isdir(weight_dir):
         makedirs(weight_dir)
     if tensorboard_dir is not None:
         if K.backend() != 'tensorflow':
             print(
                 'Ignoring tensorboard_dir setting. Backend is not '
                 'tensorflow',
                 file=stderr)
             tensorboard_dir = None
         elif not isdir(tensorboard_dir):
             makedirs(tensorboard_dir)
     self._tensorboard_dir = tensorboard_dir
     self._weight_dir = weight_dir
     self._num_labels = num_labels
     self._input_shape = input_shape
     self._fine_tuning = fine_tuning
     if num_deltas:
         self._deltas = Deltas(num_deltas, concatenate=concatenate)
     else:
         self._deltas = None
     self._audio_input = None
     self._audio_size_input = None
     self._label_input = None
     self._label_size_input = None
     self._activation_layer = None
     self._acoustic_model = None
     self._double_weights = double_weights
     if cmvn_path:
         self._cmvn = CMVN(cmvn_path, dtype='bm')
     else:
         self._cmvn = CMVN()
     # constants or initial settings based on paper
     self._filt_size = (5, 3)  # time first, unlike paper
     self._pool_size = (1, 3)
     self._dropout_p = 0.3
     # I asked the first author about this. To keep the number of
     # parameters constant for maxout, she halved the values she
     # reported in the paper
     self._initial_filts = 128 // (1 if double_weights else 2)
     self._dense_size = 1024 // (1 if double_weights else 2)
     self._layer_kwargs = {
         'activation': 'linear',
         'kernel_initializer': 'uniform',
     }
     if self._fine_tuning:
         self._layer_kwargs['kernel_regularizer'] = l2(l=1e-5)
     self._construct_acoustic_model()
     self._past_epochs = 0
     self._acoustic_model.summary()
     super(ConvCTC, self).__init__()
コード例 #57
0
model.add(Conv2D(512, (1, 3), strides=(1, 1), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(512, (3, 1), strides=(1, 1), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())

model.add(GlobalAveragePooling2D())

# model.add(Flatten())
model.add(Dense(2048))
model.add(LeakyReLU(alpha=0.1))
model.add(Dropout(0.4))
model.add(Dense(1024, kernel_regularizer=regularizers.l2(0.01)))
model.add(LeakyReLU(alpha=0.1))
model.add(Dropout(0.4))

model.add(Dense(500, activation='softmax'))
# model.summary()

##########################
model.compile(optimizer=optimizers.adam(lr=1e-4),
              loss='categorical_crossentropy',
              metrics=['accuracy'])

model.summary()
###########################
# Train = np.load("label.npy")
# # print Train.shape
コード例 #58
0
def DarknetConv2D(*args, **kwargs):
    """Wrapper to set Darknet weight regularizer for Convolution2D."""
    darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
    darknet_conv_kwargs.update(kwargs)
    return _DarknetConv2D(*args, **darknet_conv_kwargs)
  loss = "mean_squared_error"

  #augment_trainX, augment_trainY = train_augment(X_train, Y_train)
  augment_trainX, augment_trainY = train_augment(X_train, Y_train_std) #Standardized version
  print("Training X shape:")
  print(augment_trainX.shape)
  print("Training Y shape:")
  print(augment_trainY.shape)
  print("Test X shape:")
  print(X_test.shape)
  print("Test Y shape:")
  print(Y_test.shape)
  print("Start Training:")
# create model
  model= Sequential()
  model.add(Dense(first_hidden_layer, input_dim=input_nodes, kernel_initializer=initialization, kernel_regularizer=regularizers.l2(regularization), activation='relu'))
  model.add(Dense(second_hidden_layer, kernel_initializer=initialization, kernel_regularizer=regularizers.l2(regularization), activation='relu'))
  model.add(Dropout(0.5))
  model.add(
            Dense(third_hidden_layer, kernel_initializer=initialization, kernel_regularizer=regularizers.l2(regularization),
            activation='relu'))
  model.add(Dropout(0.5))
  model.add(Dense(output_nodes, kernel_initializer=initialization, activation='relu'))
# Compile model
  Adam = optimizers.adam(learning_rate=lr, decay=1e-7)
  model.compile(loss=loss,optimizer=Adam)
# Fit the model
  import time
  train_start=time.clock()
#model.fit(trainX, trainY, epochs=epoch, batch_size=10, verbose=0) #Fit the model based on current training set, excluding the test sample.
  history = model.fit(augment_trainX, augment_trainY, validation_split = 0.2, epochs=epoch, batch_size=128, verbose=0) #Fit the model based on expanded training set, after excluding the test sample.
コード例 #60
0
def makeModelFusion1():
    global batch_size, nW2V, nAttributes, nVis, reg

    w2v = Input(shape=(nW2V, ))
    a2v = Input(shape=(nW2V, ))

    dense1_w2v = Dense(256,
                       kernel_regularizer=regularizers.l2(reg),
                       bias_regularizer=regularizers.l2(reg))(w2v)
    activ1_w2v = Activation('relu')(dense1_w2v)

    dense2_w2v = Dense(128,
                       kernel_regularizer=regularizers.l2(reg),
                       bias_regularizer=regularizers.l2(reg))(activ1_w2v)
    activ2_w2v = Activation('relu')(dense2_w2v)

    Model1 = Model(inputs=w2v, outputs=activ2_w2v)

    dense1_a2v = Dense(256,
                       kernel_regularizer=regularizers.l2(reg),
                       bias_regularizer=regularizers.l2(reg))(a2v)
    activ1_a2v = Activation('relu')(dense1_a2v)

    dense2_a2v = Dense(128,
                       kernel_regularizer=regularizers.l2(reg),
                       bias_regularizer=regularizers.l2(reg))(activ1_a2v)
    activ2_a2v = Activation('relu')(dense2_a2v)

    Model2 = Model(inputs=a2v, outputs=activ2_a2v)

    combined = Add()([Model1.output, Model2.output])

    dense3 = Dense(nAttributes,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(combined)
    activ3 = Activation('relu')(dense3)

    dense4 = Dense(512,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(activ3)
    activ4 = Activation('relu')(dense4)

    dense5 = Dense(1600,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(activ4)
    activ5 = Activation('relu')(dense5)

    dense6 = Dense(nVis,
                   kernel_regularizer=regularizers.l2(reg),
                   bias_regularizer=regularizers.l2(reg))(activ5)
    activ6 = Activation('relu')(dense6)

    bilinear = Model(inputs=[w2v, a2v], outputs=[activ3, activ6])

    return bilinear