Пример #1
0
    def projection_block(self, x, strides=(2, 2), **metaparameters):
        """ Construct a ResNeXT block with projection shortcut
            x          : input to the block
            strides    : whether entry convolution is strided (i.e., (2, 2) vs (1, 1))
            filters_in : number of filters  (channels) at the input convolution
            filters_out: number of filters (channels) at the output convolution
            cardinality: width of group convolution
        """
        filters_in = metaparameters['filters_in']
        filters_out = metaparameters['filters_out']
        if 'cardinality' in metaparameters:
            cardinality = metaparameters['cardinality']
        else:
            cardinality = self.cardinality

        # Construct the projection shortcut
        # Increase filters by 2X to match shape when added to output of block
        shortcut = self.Conv2D(x,
                               filters_out, (1, 1),
                               strides=strides,
                               padding='same',
                               **metaparameters)
        shortcut = self.BatchNormalization(shortcut)

        # Dimensionality Reduction
        x = self.Conv2D(x,
                        filters_in, (1, 1),
                        strides=(1, 1),
                        padding='same',
                        use_bias=False,
                        **metaparameters)
        x = self.BatchNormalization(x)
        x = self.ReLU(x)

        # Cardinality (Wide) Layer (split-transform)
        filters_card = filters_in // cardinality
        groups = []
        for i in range(cardinality):
            group = Lambda(lambda z: z[:, :, :, i * filters_card:i *
                                       filters_card + filters_card])(x)
            groups.append(
                self.Conv2D(group,
                            filters_card, (3, 3),
                            strides=strides,
                            padding='same',
                            use_bias=False,
                            **metaparameters))

        # Concatenate the outputs of the cardinality layer together (merge)
        x = Concatenate()(groups)
        x = self.BatchNormalization(x)
        x = self.ReLU(x)

        # Dimensionality restoration
        x = self.Conv2D(x,
                        filters_out, (1, 1),
                        strides=(1, 1),
                        padding='same',
                        use_bias=False,
                        **metaparameters)
        x = self.BatchNormalization(x)

        # Identity Link: Add the shortcut (input) to the output of the block
        x = Add()([shortcut, x])
        x = self.ReLU(x)
        return x
Пример #2
0
def khop_model_distribute10(): # input/output = num of sensors 
    sensor_matrix1 = Input(shape=(num_sensors+1, num_sensors+1))
    sensor_matrix2 = Input(shape=(num_sensors+1, num_sensors+1))
    #sensor_matrix3 = Input(shape=(num_sensors, num_sensors))
    s_input1 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input2 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input3 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input4 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input5 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input6 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input7 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input8 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input9 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input10 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    
    s_cnn = sensor_cnn(input_shape, repetitions = [2,2,2,2])
    extract_cnn1 = s_cnn(s_input1)
    extract_cnn2 = s_cnn(s_input2)
    extract_cnn3 = s_cnn(s_input3)
    extract_cnn4 = s_cnn(s_input4)
    extract_cnn5 = s_cnn(s_input5)
    extract_cnn6 = s_cnn(s_input6)
    extract_cnn7 = s_cnn(s_input7)
    extract_cnn8 = s_cnn(s_input8)
    extract_cnn9 = s_cnn(s_input9)
    extract_cnn10 = s_cnn(s_input10)
    
    extract_cnn = Concatenate(axis=1)([extract_cnn1, extract_cnn2, extract_cnn3, 
                                       extract_cnn4, extract_cnn5, extract_cnn6,
                                       extract_cnn7, extract_cnn8, extract_cnn9, extract_cnn10])
        
    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_h1 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix1])
    G_h2 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix2])
    G_1 = Concatenate(axis=-1)([G_h1, G_h2])
  
    G_2h1 = GraphConv(256, 'relu')([G_1, sensor_matrix1])
    G_2h2 = GraphConv(256, 'relu')([G_1, sensor_matrix2])
    G_2 = Concatenate(axis=-1)([G_2h1, G_2h2])
    
    gnn_output = tf.split(G_2, num_sensors+1, 1)
        
    mlp_layer = mlp_model()
    
    output1 = mlp_layer(Flatten()(gnn_output[0]))
    output2 = mlp_layer(Flatten()(gnn_output[1]))
    output3 = mlp_layer(Flatten()(gnn_output[2]))
    output4 = mlp_layer(Flatten()(gnn_output[3]))
    output5 = mlp_layer(Flatten()(gnn_output[4]))
    output6 = mlp_layer(Flatten()(gnn_output[5]))
    output7 = mlp_layer(Flatten()(gnn_output[6]))
    output8 = mlp_layer(Flatten()(gnn_output[7]))
    output9 = mlp_layer(Flatten()(gnn_output[8]))
    output10 = mlp_layer(Flatten()(gnn_output[9]))
    
    model = Model(inputs=[s_input1, s_input2, s_input3, s_input4,
                          s_input5, s_input6, s_input7, s_input8, s_input9,s_input10,
                          sensor_matrix1, sensor_matrix2], 
                  outputs= [output1,output2,output3,output4,
                            output5,output6,output7,output8,output9,output10])
    return model
Пример #3
0
def predict_model(input_size, epochs=200, lr=1e-3):    
    inputs = Input(shape=input_size, name='inputs')

    # Embedding input
    wday_input = Input(shape=(1,), name='wday')
    month_input = Input(shape=(1,), name='month')
    year_input = Input(shape=(1,), name='year')
    mday_input = Input(shape=(1,), name='mday')
    quarter_input = Input(shape=(1,), name='quarter')
    event_name_1_input = Input(shape=(1,), name='event_name_1')
    event_type_1_input = Input(shape=(1,), name='event_type_1')
    event_name_2_input = Input(shape=(1,), name='event_name_2')
    event_type_2_input = Input(shape=(1,), name='event_type_2')
    item_id_input = Input(shape=(1,), name='item_id')
    dept_id_input = Input(shape=(1,), name='dept_id')
    store_id_input = Input(shape=(1,), name='store_id')
    cat_id_input = Input(shape=(1,), name='cat_id')
    state_id_input = Input(shape=(1,), name='state_id')
    snap_CA_input = Input(shape=(1,), name='snap_CA')
    snap_TX_input = Input(shape=(1,), name='snap_TX')
    snap_WI_input = Input(shape=(1,), name='snap_WI')


    wday_emb = Flatten()(Embedding(7, 1)(wday_input))
    month_emb = Flatten()(Embedding(12, 2)(month_input))
    year_emb = Flatten()(Embedding(6, 1)(year_input))
    mday_emb = Flatten()(Embedding(31, 2)(mday_input))
    quarter_emb = Flatten()(Embedding(4, 1)(quarter_input))
    event_name_1_emb = Flatten()(Embedding(31, 2)(event_name_1_input))
    event_type_1_emb = Flatten()(Embedding(5, 1)(event_type_1_input))
    event_name_2_emb = Flatten()(Embedding(5, 1)(event_name_2_input))
    event_type_2_emb = Flatten()(Embedding(5, 1)(event_type_2_input))

    item_id_emb = Flatten()(Embedding(3049, 4)(item_id_input))
    dept_id_emb = Flatten()(Embedding(7, 1)(dept_id_input))
    store_id_emb = Flatten()(Embedding(10, 1)(store_id_input))
    cat_id_emb = Flatten()(Embedding(6, 1)(cat_id_input))
    state_id_emb = Flatten()(Embedding(3, 1)(state_id_input))
    

    x = Concatenate(-1)([inputs, wday_emb, month_emb, month_emb, year_emb, mday_emb, \
                        quarter_emb, event_name_1_emb, event_type_1_emb, event_name_2_emb, \
                        event_type_2_emb, item_id_emb, dept_id_emb, store_id_emb, cat_id_emb, \
                        state_id_emb])

    x = Dense(1024, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)        
    x = Dense(256, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(128, activation='relu')(x)

    x_deep = Dense(64, activation='relu')(x)
    x_deep = BatchNormalization()(x_deep)
    x_deep = Dense(128, activation='relu')(x_deep)
    x_deep = BatchNormalization()(x_deep)
    x_deep = Dense(256, activation='relu')(x_deep)

    x = Concatenate(-1)([inputs, x])
    x_res = Dense(64, activation='relu')(x)
    x_res = BatchNormalization()(x_res)
    x_res = Dense(128, activation='relu')(x_res)
    x_res = BatchNormalization()(x_res)
    x_res = Dense(256, activation='relu')(x_res)

    x = Concatenate(-1)([x_deep, x_res])

    outputs = Dense(1, activation='sigmoid')(x)
    # outputs = resnet_v2(x)
    
    # optimizer = Adam(lr=lr)#Adam(lr=lr)
    input_dic = {
        'inputs': inputs, 'wday': wday_input, 'month': month_input, 'year': year_input,
        'mday': mday_input, 'quarter': quarter_input, 'event_name_1': event_name_1_input,
        'event_type_1': event_type_1_input, 'event_name_2': event_name_2_input,
        'event_type_2': event_type_2_input, 'item_id': item_id_input, 'dept_id': dept_id_input,
        'store_id': store_id_input, 'cat_id': cat_id_input, 'state_id': state_id_input,

    }
    model = Model(input_dic, outputs)#, name='predict_model')
    
    return model
Пример #4
0
    def __build_model(self, task, num_classes, nets, categorical_columns,
                      continuous_columns, var_len_categorical_columns, config):
        logger.info(f'Building model...')
        self.model_desc = ModelDesc()
        categorical_inputs, continuous_inputs, var_len_categorical_inputs = \
            self.__build_inputs(categorical_columns, continuous_columns, var_len_categorical_columns)
        embeddings = self.__build_embeddings(categorical_columns,
                                             categorical_inputs,
                                             var_len_categorical_columns,
                                             var_len_categorical_inputs,
                                             config.embedding_dropout)
        dense_layer = self.__build_denses(continuous_columns,
                                          continuous_inputs,
                                          config.dense_dropout)

        flatten_emb_layer = None
        if len(embeddings) > 0:
            if len(embeddings) == 1:
                flatten_emb_layer = Flatten(name='flatten_embeddings')(
                    embeddings[0])
            else:
                flatten_emb_layer = Flatten(name='flatten_embeddings')(
                    Concatenate(name='concat_embeddings_axis_0')(embeddings))

        self.model_desc.nets = nets
        self.model_desc.stacking = config.stacking_op
        concat_emb_dense = self.__concat_emb_dense(flatten_emb_layer,
                                                   dense_layer)
        # concat_emb_dense = flatten_emb_layer
        outs = {}
        for net in nets:
            logit = deepnets.get(net)
            out = logit(embeddings, flatten_emb_layer, dense_layer,
                        concat_emb_dense, self.config, self.model_desc)
            if out is not None:
                outs[net] = out
        if len(outs) > 1:
            logits = []
            for name, out in outs.items():
                if len(out.shape) > 2:
                    out = Flatten(name=f'flatten_{name}_out')(out)
                if out.shape[-1] > 1:
                    logit = Dense(1,
                                  use_bias=False,
                                  activation=None,
                                  name=f'dense_logit_{name}')(out)
                else:
                    logit = out
                logits.append(logit)
            if config.stacking_op == consts.STACKING_OP_ADD:
                x = Add(name='add_logits')(logits)
            elif config.stacking_op == consts.STACKING_OP_CONCAT:
                x = Concatenate(name='concat_logits')(logits)
            else:
                raise ValueError(
                    f'Unsupported stacking_op:{config.stacking_op}.')
        elif (len(outs) == 1):
            name, out = outs.popitem()
            # out = list(outs.values())[0]
            if len(out.shape) > 2:
                out = Flatten(name=f'flatten_{name}_out')(out)
            x = out
        else:
            raise ValueError(f'Unexcepted logit output.{outs}')
        all_inputs = list(categorical_inputs.values()) + list(var_len_categorical_inputs.values()) + \
                     list(continuous_inputs.values())
        output = self.__output_layer(x,
                                     task,
                                     num_classes,
                                     use_bias=self.config.output_use_bias)
        model = Model(inputs=all_inputs, outputs=output)
        model = self.__compile_model(model, task, num_classes,
                                     config.optimizer, config.loss,
                                     config.metrics)
        print(self.model_desc)
        return model
model = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(model)

model = Conv2D(64, (1, 1), activation='relu')(model)
model = Conv2D(192, (3, 3), padding='same', activation='relu')(model)
model = BatchNormalization()(model)
model = MaxPooling2D((3, 3), strides=2, padding='same')(model)

# INCEPTION MODULE
tower_1 = Conv2D(64, (1, 1), activation='relu')(model)
tower_2 = Conv2D(96, (1, 1), activation='relu')(model)
tower_2 = Conv2D(128, (3, 3), padding='same', activation='relu')(tower_2)
tower_3 = Conv2D(16, (1, 1), activation='relu')(model)
tower_3 = Conv2D(32, (5, 5), padding='same', activation='relu')(tower_3)
tower_4 = MaxPooling2D((3, 3), strides=1, padding='same')(model)
tower_4 = Conv2D(32, (1, 1), activation='relu')(tower_4)
model = Concatenate(axis=-1)([tower_1, tower_2, tower_3, tower_4])
# INCEPTION MODULE
tower_1 = Conv2D(128, (1, 1), activation='relu')(model)
tower_2 = Conv2D(128, (1, 1), activation='relu')(model)
tower_2 = Conv2D(192, (3, 3), padding='same', activation='relu')(tower_2)
tower_3 = Conv2D(32, (1, 1), activation='relu')(model)
tower_3 = Conv2D(96, (5, 5), padding='same', activation='relu')(tower_3)
tower_4 = MaxPooling2D((3, 3), strides=1, padding='same')(model)
tower_4 = Conv2D(64, (1, 1), activation='relu')(tower_4)
model = Concatenate(axis=-1)([tower_1, tower_2, tower_3, tower_4])
model = MaxPooling2D((3, 3), strides=2, padding='same')(model)

# INCEPTION MODULE
tower_1 = Conv2D(192, (1, 1), activation='relu')(model)
tower_2 = Conv2D(96, (1, 1), activation='relu')(model)
tower_2 = Conv2D(208, (3, 3), padding='same', activation='relu')(tower_2)
Пример #6
0
def InceptionResNetV2():
	
	inputs = Input(shape=(160, 160, 3))
	x = Conv2D(32, 3, strides=2, padding='valid', use_bias=False, name= 'Conv2d_1a_3x3') (inputs)
	x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_1a_3x3_BatchNorm')(x)
	x = Activation('relu', name='Conv2d_1a_3x3_Activation')(x)
	x = Conv2D(32, 3, strides=1, padding='valid', use_bias=False, name= 'Conv2d_2a_3x3') (x)
	x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_2a_3x3_BatchNorm')(x)
	x = Activation('relu', name='Conv2d_2a_3x3_Activation')(x)
	x = Conv2D(64, 3, strides=1, padding='same', use_bias=False, name= 'Conv2d_2b_3x3') (x)
	x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_2b_3x3_BatchNorm')(x)
	x = Activation('relu', name='Conv2d_2b_3x3_Activation')(x)
	x = MaxPooling2D(3, strides=2, name='MaxPool_3a_3x3')(x)
	x = Conv2D(80, 1, strides=1, padding='valid', use_bias=False, name= 'Conv2d_3b_1x1') (x)
	x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_3b_1x1_BatchNorm')(x)
	x = Activation('relu', name='Conv2d_3b_1x1_Activation')(x)
	x = Conv2D(192, 3, strides=1, padding='valid', use_bias=False, name= 'Conv2d_4a_3x3') (x)
	x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_4a_3x3_BatchNorm')(x)
	x = Activation('relu', name='Conv2d_4a_3x3_Activation')(x)
	x = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Conv2d_4b_3x3') (x)
	x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_4b_3x3_BatchNorm')(x)
	x = Activation('relu', name='Conv2d_4b_3x3_Activation')(x)
	
	# 5x Block35 (Inception-ResNet-A block):
	branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block35_1_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_1_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_1_Conv2d_0b_3x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_1_Branch_1_Conv2d_0b_3x3_Activation')(branch_1)
	branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_2_Conv2d_0a_1x1') (x)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_1_Branch_2_Conv2d_0a_1x1_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_2_Conv2d_0b_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_1_Branch_2_Conv2d_0b_3x3_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_2_Conv2d_0c_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_1_Branch_2_Conv2d_0c_3x3_Activation')(branch_2)
	branches = [branch_0, branch_1, branch_2]
	mixed = Concatenate(axis=3, name='Block35_1_Concatenate')(branches)
	up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_1_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up)
	x = add([x, up])
	x = Activation('relu', name='Block35_1_Activation')(x)
	
	branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block35_2_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_2_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_1_Conv2d_0b_3x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_2_Branch_1_Conv2d_0b_3x3_Activation')(branch_1)
	branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_2_Conv2d_0a_1x1') (x)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_2_Branch_2_Conv2d_0a_1x1_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_2_Conv2d_0b_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_2_Branch_2_Conv2d_0b_3x3_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_2_Conv2d_0c_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_2_Branch_2_Conv2d_0c_3x3_Activation')(branch_2)
	branches = [branch_0, branch_1, branch_2]
	mixed = Concatenate(axis=3, name='Block35_2_Concatenate')(branches)
	up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_2_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up)
	x = add([x, up])
	x = Activation('relu', name='Block35_2_Activation')(x)
	
	branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block35_3_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_3_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_1_Conv2d_0b_3x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_3_Branch_1_Conv2d_0b_3x3_Activation')(branch_1)
	branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_2_Conv2d_0a_1x1') (x)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_3_Branch_2_Conv2d_0a_1x1_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_2_Conv2d_0b_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_3_Branch_2_Conv2d_0b_3x3_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_2_Conv2d_0c_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_3_Branch_2_Conv2d_0c_3x3_Activation')(branch_2)
	branches = [branch_0, branch_1, branch_2]
	mixed = Concatenate(axis=3, name='Block35_3_Concatenate')(branches)
	up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_3_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up)
	x = add([x, up])
	x = Activation('relu', name='Block35_3_Activation')(x)
	
	branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block35_4_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_4_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_1_Conv2d_0b_3x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_4_Branch_1_Conv2d_0b_3x3_Activation')(branch_1)
	branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_2_Conv2d_0a_1x1') (x)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_4_Branch_2_Conv2d_0a_1x1_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_2_Conv2d_0b_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_4_Branch_2_Conv2d_0b_3x3_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_2_Conv2d_0c_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_4_Branch_2_Conv2d_0c_3x3_Activation')(branch_2)
	branches = [branch_0, branch_1, branch_2]
	mixed = Concatenate(axis=3, name='Block35_4_Concatenate')(branches)
	up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_4_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up)
	x = add([x, up])
	x = Activation('relu', name='Block35_4_Activation')(x)
	
	branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block35_5_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_5_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_1_Conv2d_0b_3x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block35_5_Branch_1_Conv2d_0b_3x3_Activation')(branch_1)
	branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_2_Conv2d_0a_1x1') (x)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_5_Branch_2_Conv2d_0a_1x1_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_2_Conv2d_0b_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_5_Branch_2_Conv2d_0b_3x3_Activation')(branch_2)
	branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_2_Conv2d_0c_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Block35_5_Branch_2_Conv2d_0c_3x3_Activation')(branch_2)
	branches = [branch_0, branch_1, branch_2]
	mixed = Concatenate(axis=3, name='Block35_5_Concatenate')(branches)
	up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_5_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up)
	x = add([x, up])
	x = Activation('relu', name='Block35_5_Activation')(x)

	# Mixed 6a (Reduction-A block):
	branch_0 = Conv2D(384, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_6a_Branch_0_Conv2d_1a_3x3') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_0_Conv2d_1a_3x3_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Mixed_6a_Branch_0_Conv2d_1a_3x3_Activation')(branch_0)
	branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_6a_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Mixed_6a_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(192, 3, strides=1, padding='same', use_bias=False, name= 'Mixed_6a_Branch_1_Conv2d_0b_3x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Mixed_6a_Branch_1_Conv2d_0b_3x3_Activation')(branch_1)
	branch_1 = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_6a_Branch_1_Conv2d_1a_3x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_1_Conv2d_1a_3x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Mixed_6a_Branch_1_Conv2d_1a_3x3_Activation')(branch_1)
	branch_pool = MaxPooling2D(3, strides=2, padding='valid', name='Mixed_6a_Branch_2_MaxPool_1a_3x3')(x)
	branches = [branch_0, branch_1, branch_pool]
	x = Concatenate(axis=3, name='Mixed_6a')(branches)

	# 10x Block17 (Inception-ResNet-B block):
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_1_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_1_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_1_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_1_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_1_Branch_1_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_1_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_1_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_1_Branch_1_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_1_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_1_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_1_Activation')(x)
	
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_2_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_2_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_2_Branch_2_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_2_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_2_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_2_Branch_2_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_2_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_2_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_2_Branch_2_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_2_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_2_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_2_Activation')(x)
	
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_3_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_3_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_3_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_3_Branch_3_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_3_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_3_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_3_Branch_3_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_3_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_3_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_3_Branch_3_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_3_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_3_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_3_Activation')(x)
	
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_4_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_4_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_4_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_4_Branch_4_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_4_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_4_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_4_Branch_4_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_4_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_4_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_4_Branch_4_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_4_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_4_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_4_Activation')(x)
	
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_5_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_5_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_5_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_5_Branch_5_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_5_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_5_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_5_Branch_5_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_5_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_5_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_5_Branch_5_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_5_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_5_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_5_Activation')(x)
	
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_6_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_6_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_6_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_6_Branch_6_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_6_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_6_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_6_Branch_6_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_6_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_6_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_6_Branch_6_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_6_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_6_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_6_Activation')(x)	
	
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_7_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_7_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_7_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_7_Branch_7_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_7_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_7_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_7_Branch_7_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_7_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_7_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_7_Branch_7_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_7_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_7_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_7_Activation')(x)
	
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_8_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_8_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_8_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_8_Branch_8_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_8_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_8_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_8_Branch_8_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_8_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_8_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_8_Branch_8_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_8_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_8_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_8_Activation')(x)
	
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_9_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_9_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_9_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_9_Branch_9_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_9_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_9_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_9_Branch_9_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_9_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_9_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_9_Branch_9_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_9_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_9_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_9_Activation')(x)
	
	branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block17_10_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_10_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_10_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_10_Branch_10_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_10_Conv2d_0b_1x7') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_10_Conv2d_0b_1x7_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_10_Branch_10_Conv2d_0b_1x7_Activation')(branch_1)
	branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_10_Conv2d_0c_7x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_10_Conv2d_0c_7x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block17_10_Branch_10_Conv2d_0c_7x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block17_10_Concatenate')(branches)
	up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_10_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up)
	x = add([x, up])
	x = Activation('relu', name='Block17_10_Activation')(x)

	# Mixed 7a (Reduction-B block): 8 x 8 x 2080	
	branch_0 = Conv2D(256, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_0_Conv2d_0a_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_0_Conv2d_0a_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Mixed_7a_Branch_0_Conv2d_0a_1x1_Activation')(branch_0)
	branch_0 = Conv2D(384, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_7a_Branch_0_Conv2d_1a_3x3') (branch_0)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_0_Conv2d_1a_3x3_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Mixed_7a_Branch_0_Conv2d_1a_3x3_Activation')(branch_0)
	branch_1 = Conv2D(256, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Mixed_7a_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_7a_Branch_1_Conv2d_1a_3x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_1_Conv2d_1a_3x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Mixed_7a_Branch_1_Conv2d_1a_3x3_Activation')(branch_1)
	branch_2 = Conv2D(256, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_2_Conv2d_0a_1x1') (x)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Mixed_7a_Branch_2_Conv2d_0a_1x1_Activation')(branch_2)
	branch_2 = Conv2D(256, 3, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_2_Conv2d_0b_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Mixed_7a_Branch_2_Conv2d_0b_3x3_Activation')(branch_2)
	branch_2 = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_7a_Branch_2_Conv2d_1a_3x3') (branch_2)
	branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_2_Conv2d_1a_3x3_BatchNorm')(branch_2)
	branch_2 = Activation('relu', name='Mixed_7a_Branch_2_Conv2d_1a_3x3_Activation')(branch_2)
	branch_pool = MaxPooling2D(3, strides=2, padding='valid', name='Mixed_7a_Branch_3_MaxPool_1a_3x3')(x)
	branches = [branch_0, branch_1, branch_2, branch_pool]
	x = Concatenate(axis=3, name='Mixed_7a')(branches)

	# 5x Block8 (Inception-ResNet-C block):
	
	branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block8_1_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_1_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_1_Conv2d_0b_1x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_1_Conv2d_0b_1x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_1_Branch_1_Conv2d_0b_1x3_Activation')(branch_1)
	branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_1_Conv2d_0c_3x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_1_Conv2d_0c_3x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_1_Branch_1_Conv2d_0c_3x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block8_1_Concatenate')(branches)
	up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_1_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up)
	x = add([x, up])
	x = Activation('relu', name='Block8_1_Activation')(x)
	
	branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block8_2_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_2_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_2_Branch_2_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_2_Conv2d_0b_1x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_2_Conv2d_0b_1x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_2_Branch_2_Conv2d_0b_1x3_Activation')(branch_1)
	branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_2_Conv2d_0c_3x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_2_Conv2d_0c_3x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_2_Branch_2_Conv2d_0c_3x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block8_2_Concatenate')(branches)
	up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_2_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up)
	x = add([x, up])
	x = Activation('relu', name='Block8_2_Activation')(x)
	
	branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block8_3_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_3_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_3_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_3_Branch_3_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_3_Conv2d_0b_1x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_3_Conv2d_0b_1x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_3_Branch_3_Conv2d_0b_1x3_Activation')(branch_1)
	branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_3_Conv2d_0c_3x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_3_Conv2d_0c_3x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_3_Branch_3_Conv2d_0c_3x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block8_3_Concatenate')(branches)
	up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_3_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up)
	x = add([x, up])
	x = Activation('relu', name='Block8_3_Activation')(x)
	
	branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block8_4_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_4_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_4_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_4_Branch_4_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_4_Conv2d_0b_1x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_4_Conv2d_0b_1x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_4_Branch_4_Conv2d_0b_1x3_Activation')(branch_1)
	branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_4_Conv2d_0c_3x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_4_Conv2d_0c_3x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_4_Branch_4_Conv2d_0c_3x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block8_4_Concatenate')(branches)
	up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_4_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up)
	x = add([x, up])
	x = Activation('relu', name='Block8_4_Activation')(x)
	
	branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block8_5_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_5_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_5_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_5_Branch_5_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_5_Conv2d_0b_1x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_5_Conv2d_0b_1x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_5_Branch_5_Conv2d_0b_1x3_Activation')(branch_1)
	branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_5_Conv2d_0c_3x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_5_Conv2d_0c_3x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_5_Branch_5_Conv2d_0c_3x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block8_5_Concatenate')(branches)
	up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_5_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up)
	x = add([x, up])
	x = Activation('relu', name='Block8_5_Activation')(x)
	
	branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_0_Conv2d_1x1') (x)
	branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_0_Conv2d_1x1_BatchNorm')(branch_0)
	branch_0 = Activation('relu', name='Block8_6_Branch_0_Conv2d_1x1_Activation')(branch_0)
	branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_1_Conv2d_0a_1x1') (x)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_6_Branch_1_Conv2d_0a_1x1_Activation')(branch_1)
	branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_1_Conv2d_0b_1x3') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_1_Conv2d_0b_1x3_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_6_Branch_1_Conv2d_0b_1x3_Activation')(branch_1)
	branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_1_Conv2d_0c_3x1') (branch_1)
	branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_1_Conv2d_0c_3x1_BatchNorm')(branch_1)
	branch_1 = Activation('relu', name='Block8_6_Branch_1_Conv2d_0c_3x1_Activation')(branch_1)
	branches = [branch_0, branch_1]
	mixed = Concatenate(axis=3, name='Block8_6_Concatenate')(branches)
	up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_6_Conv2d_1x1') (mixed)
	up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 1})(up)
	x = add([x, up])
	
	# Classification block
	x = GlobalAveragePooling2D(name='AvgPool')(x)
	x = Dropout(1.0 - 0.8, name='Dropout')(x)
	# Bottleneck
	x = Dense(128, use_bias=False, name='Bottleneck')(x)
	x = BatchNormalization(momentum=0.995, epsilon=0.001, scale=False, name='Bottleneck_BatchNorm')(x)

	# Create model
	model = Model(inputs, x, name='inception_resnet_v1')

	return model
Пример #7
0
def tiny_yolo4lite_mobilenet_body(inputs,
                                  num_anchors,
                                  num_classes,
                                  alpha=1.0,
                                  use_spp=True):
    '''Create Tiny YOLO_v3 Lite MobileNet model CNN body in keras.'''
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    # f1 :13 x 13 x (1024*alpha) for 416 input
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f2: 26 x 26 x (512*alpha) for 416 input
    f2 = mobilenet.get_layer('conv_pw_11_relu').output

    f1_channel_num = int(1024 * alpha)
    f2_channel_num = int(512 * alpha)

    #feature map 1 head (13 x 13 x (512*alpha) for 416 input)
    x1 = DarknetConv2D_BN_Leaky(f1_channel_num // 2, (1, 1))(f1)
    if use_spp:
        x1 = Spp_Conv2D_BN_Leaky(x1, f1_channel_num // 2)

    #upsample fpn merge for feature map 1 & 2
    x1_upsample = compose(DarknetConv2D_BN_Leaky(f2_channel_num // 2, (1, 1)),
                          UpSampling2D(2))(x1)
    x2 = compose(
        Concatenate(),
        #DarknetConv2D_BN_Leaky(f2_channel_num, (3,3)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=f2_channel_num,
                                            kernel_size=(3, 3),
                                            block_id_str='15'))(
                                                [x1_upsample, f2])

    #feature map 2 output (26 x 26 x (512*alpha) for 416 input)
    y2 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x2)

    #downsample fpn merge for feature map 2 & 1
    x2_downsample = compose(
        ZeroPadding2D(((1, 0), (1, 0))),
        #DarknetConv2D_BN_Leaky(f1_channel_num//2, (3,3), strides=(2,2)),
        Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num // 2,
                                                    (3, 3),
                                                    strides=(2, 2),
                                                    block_id_str='16'))(x2)
    x1 = compose(
        Concatenate(),
        #DarknetConv2D_BN_Leaky(f1_channel_num, (3,3)),
        Depthwise_Separable_Conv2D_BN_Leaky(filters=f1_channel_num,
                                            kernel_size=(3, 3),
                                            block_id_str='17'))(
                                                [x2_downsample, x1])

    #feature map 1 output (13 x 13 x (1024*alpha) for 416 input)
    y1 = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x1)

    return Model(inputs, [y1, y2])
Пример #8
0
def yoloNano(anchors,input_size=416,num_classes = 1,expension = .75,decay=0.0005):
    #f**k tensorflow 2.x
    #backbone
    input_0 = Input(shape=(input_size,input_size,3))
    input_gt = [Input(shape=(input_size//{0:32, 1:16, 2:8}[l], input_size//{0:32, 1:16, 2:8}[l],len(anchors)//3, num_classes+5)) for l in range(3)]
    x = Conv2D(filters=12,strides=(1,1),kernel_size=(3,3),use_bias=False,padding='same',kernel_regularizer=l2(l=decay))(input_0)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=24,strides=(2,2),kernel_size=(3,3),use_bias=False,padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_0 = LeakyReLU(alpha = 0.1)(x)
    #PEP(7)(208x208x24)
    x = Conv2D(filters=7,strides=(1,1),kernel_size=(1,1),use_bias=False,padding='same',kernel_regularizer=l2(l=decay))(x_0)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(24 * expension),strides=(1,1),kernel_size=(1,1),use_bias=False,padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1,1),kernel_size=(3,3),use_bias=False,padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=24,strides=(1,1),kernel_size=(1,1),use_bias=False,padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = Add()([x_0 ,x])
    #EP(104x104x70)
    x = Conv2D(filters=math.ceil(70 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(2, 2), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=70, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x_1 = BatchNormalization()(x)
    #PEP(25)(104x104x70)
    x = Conv2D(filters=25, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_1)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(70 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=70, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_2 = Add()([x_1, x])
    # PEP(24)(104x104x70)
    x = Conv2D(filters=24, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_2)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(70 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=70, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = Add()([x_2, x])
    # EP(52x52x150)
    x = Conv2D(filters=math.ceil(150 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(2, 2), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=150, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x_3 = BatchNormalization()(x)
    # PEP(56)(52x52x150)
    x = Conv2D(filters=56, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_3)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(150 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=150, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = Add()([x_3, x])
    #Conv1x1
    x = Conv2D(filters=150,kernel_size=(1,1),strides=(1,1),use_bias=False,padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_4 = LeakyReLU(alpha = 0.1)(x)
    #FCA(8)
    x = AvgPool2D(pool_size=(52,52))(x_4)
    x = Dense(units=150 // 8,activation='relu',use_bias=False,kernel_regularizer=l2(l=decay))(x)
    x = Dense(units=150, activation='sigmoid', use_bias=False,kernel_regularizer=l2(l=decay))(x)
    x_5 = Multiply()([x_4,x])
    #PEP(73)(52x52x150)
    x = Conv2D(filters=73, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_5)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(150 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=150, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_6 = Add()([x_5, x])
    # PEP(71)(52x52x150)
    x = Conv2D(filters=71, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_6)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(150 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=150, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_7 = Add()([x_6, x])
    # PEP(75)(52x52x150)
    x = Conv2D(filters=75, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_7)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(150 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=150, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_8 = Add()([x_7, x]) #output 52x52x150
    #EP(26x26x325)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_8)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(2, 2), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x_9 = BatchNormalization()(x)
    # PEP(132)(26x26x325)
    x = Conv2D(filters=132, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_9)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_10 = Add()([x_9, x])
    # PEP(124)(26x26x325)
    x = Conv2D(filters=124, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_10)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_11 = Add()([x_10, x])
    # PEP(141)(26x26x325)
    x = Conv2D(filters=141, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_11)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_12 = Add()([x_11, x])
    # PEP(140)(26x26x325)
    x = Conv2D(filters=140, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_12)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_13 = Add()([x_12, x])
    # PEP(137)(26x26x325)
    x = Conv2D(filters=137, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_13)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_14 = Add()([x_13, x])
    # PEP(135)(26x26x325)
    x = Conv2D(filters=135, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_14)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_15 = Add()([x_14, x])
    # PEP(133)(26x26x325)
    x = Conv2D(filters=133, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_15)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_16 = Add()([x_15, x])
    # PEP(140)(26x26x325)
    x = Conv2D(filters=140, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_16)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_17 = Add()([x_16, x]) #output 26x26x325
    # EP(13x13x545)
    x = Conv2D(filters=math.ceil(545 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_17)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(2, 2), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=545, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x_18 = BatchNormalization()(x)
    # PEP(276)(13x13x545)
    x = Conv2D(filters=276, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_18)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(545 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=545, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_19 = Add()([x_18, x])
    #Conv1x1
    x = Conv2D(filters=230, kernel_size=(1, 1), strides=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_19)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    # EP(13x13x489)
    x = Conv2D(filters=math.ceil(489 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=489, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # PEP(213)(13x13x469)
    x = Conv2D(filters=213, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(469 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=469, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # Conv1x1
    x = Conv2D(filters=189, kernel_size=(1, 1), strides=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_20 = LeakyReLU(alpha = 0.1)(x) #output 13x13x189
    # EP(13x13x462)
    x = Conv2D(filters=math.ceil(462 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_20)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=462, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # feature 13x13x[(num_classes+5)x3]
    feature_13x13 = Conv2D(filters=3 * (num_classes + 5), kernel_size=(1, 1), strides=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    # Conv1x1
    x = Conv2D(filters=105, kernel_size=(1, 1), strides=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_20)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    # upsampling 26x26x105
    x = UpSampling2D()(x)
    # concatenate
    x = Concatenate()([x,x_17])
    # PEP(113)(26x26x325)
    x = Conv2D(filters=113, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(325 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=325, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # PEP(99)(26x26x207)
    x = Conv2D(filters=99, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(207 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=207, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # Conv1x1
    x = Conv2D(filters=98, kernel_size=(1, 1), strides=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x_21 = LeakyReLU(alpha = 0.1)(x)
    # EP(13x13x183)
    x = Conv2D(filters=math.ceil(183 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_21)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=183, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # feature 26x26x[(num_classes+5)x3]
    feature_26x26 = Conv2D(filters=3 * (num_classes + 5), kernel_size=(1, 1), strides=(1, 1), use_bias=False,padding='same',kernel_regularizer=l2(l=decay))(x)
    # Conv1x1
    x = Conv2D(filters=47, kernel_size=(1, 1), strides=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x_21)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    #upsampling
    x = UpSampling2D()(x)
    #concatenate
    x = Concatenate()([x,x_8])
    # PEP(58)(52x52x132)
    x = Conv2D(filters=58, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(132 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=132, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # PEP(52)(52x52x87)
    x = Conv2D(filters=52, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(87 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=87, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    # PEP(47)(52x52x93)
    x = Conv2D(filters=47, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=math.ceil(93 * expension), strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = DepthwiseConv2D(strides=(1, 1), kernel_size=(3, 3), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha = 0.1)(x)
    x = Conv2D(filters=93, strides=(1, 1), kernel_size=(1, 1), use_bias=False, padding='same',kernel_regularizer=l2(l=decay))(x)
    x = BatchNormalization()(x)
    feature_52x52 = Conv2D(filters=3 * (num_classes + 5), kernel_size=(1, 1), strides=(1, 1), use_bias=False,padding='same',kernel_regularizer=l2(l=decay))(x)
    #loss layer
    loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})([feature_13x13,feature_26x26,feature_52x52, *input_gt])

    debug_model = tf.keras.Model(inputs=input_0,outputs=[feature_13x13,feature_26x26,feature_52x52])
    train_model = tf.keras.Model(inputs=[input_0,*input_gt],outputs=loss)
    return train_model,debug_model

# import numpy as np
# anchors = np.array([[6.,9.],[8.,13.],[11.,16.],[14.,22.],[17.,37.],[21.,26.],[29.,38.],[39.,62.],[79.,99.]],dtype='float32')
# model,_ = yoloNano(anchors,input_size=416,num_classes=1)
# model.summary()
Пример #9
0
    def build(self):
        '''
        1. Build Code Representation Model
        '''
        logger.debug('Building Code Representation Model')
        methname = Input(shape=(self.data_params['methname_len'], ),
                         dtype='int32',
                         name='methname')
        apiseq = Input(shape=(self.data_params['apiseq_len'], ),
                       dtype='int32',
                       name='apiseq')
        tokens = Input(shape=(self.data_params['tokens_len'], ),
                       dtype='int32',
                       name='tokens')

        ## method name representation ##
        #1.embedding
        init_emb_weights = np.load(
            self.model_params['init_embed_weights_methname']
        ) if self.model_params[
            'init_embed_weights_methname'] is not None else None
        if init_emb_weights is not None: init_emb_weights = [init_emb_weights]
        embedding = Embedding(
            input_dim=self.data_params['n_words'],
            output_dim=self.model_params.get('n_embed_dims', 100),
            weights=init_emb_weights,
            mask_zero=
            False,  #Whether 0 in the input is a special "padding" value that should be masked out. 
            #If True, all subsequent layers in the model must support masking, otherwise an exception will be raised.
            name='embedding_methname')
        methname_embedding = embedding(methname)
        dropout = Dropout(0.25, name='dropout_methname_embed')
        methname_dropout = dropout(methname_embedding)
        #2.rnn
        f_rnn = LSTM(self.model_params.get('n_lstm_dims', 128),
                     recurrent_dropout=0.2,
                     return_sequences=True,
                     name='lstm_methname_f')

        b_rnn = LSTM(self.model_params.get('n_lstm_dims', 128),
                     return_sequences=True,
                     recurrent_dropout=0.2,
                     name='lstm_methname_b',
                     go_backwards=True)
        methname_f_rnn = f_rnn(methname_dropout)
        methname_b_rnn = b_rnn(methname_dropout)
        dropout = Dropout(0.25, name='dropout_methname_rnn')
        methname_f_dropout = dropout(methname_f_rnn)
        methname_b_dropout = dropout(methname_b_rnn)
        #3.maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False),
                         output_shape=lambda x: (x[0], x[2]),
                         name='maxpool_methname')
        methname_pool = Concatenate(name='concat_methname_lstms')(
            [maxpool(methname_f_dropout),
             maxpool(methname_b_dropout)])
        activation = Activation('tanh', name='active_methname')
        methname_repr = activation(methname_pool)

        ## API Sequence Representation ##
        #1.embedding
        embedding = Embedding(
            input_dim=self.data_params['n_words'],
            output_dim=self.model_params.get('n_embed_dims', 100),
            #weights=weights,
            mask_zero=
            False,  #Whether 0 in the input is a special "padding" value that should be masked out. 
            #If True, all subsequent layers must support masking, otherwise an exception will be raised.
            name='embedding_apiseq')
        apiseq_embedding = embedding(apiseq)
        dropout = Dropout(0.25, name='dropout_apiseq_embed')
        apiseq_dropout = dropout(apiseq_embedding)
        #2.rnn
        f_rnn = LSTM(self.model_params.get('n_lstm_dims', 100),
                     return_sequences=True,
                     recurrent_dropout=0.2,
                     name='lstm_apiseq_f')
        b_rnn = LSTM(self.model_params.get('n_lstm_dims', 100),
                     return_sequences=True,
                     recurrent_dropout=0.2,
                     name='lstm_apiseq_b',
                     go_backwards=True)
        apiseq_f_rnn = f_rnn(apiseq_dropout)
        apiseq_b_rnn = b_rnn(apiseq_dropout)
        dropout = Dropout(0.25, name='dropout_apiseq_rnn')
        apiseq_f_dropout = dropout(apiseq_f_rnn)
        apiseq_b_dropout = dropout(apiseq_b_rnn)
        #3.maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False),
                         output_shape=lambda x: (x[0], x[2]),
                         name='maxpool_apiseq')
        apiseq_pool = Concatenate(name='concat_apiseq_lstms')(
            [maxpool(apiseq_f_dropout),
             maxpool(apiseq_b_dropout)])
        activation = Activation('tanh', name='active_apiseq')
        apiseq_repr = activation(apiseq_pool)

        ## Tokens Representation ##
        #1.embedding
        init_emb_weights = np.load(
            self.model_params['init_embed_weights_tokens']
        ) if self.model_params[
            'init_embed_weights_tokens'] is not None else None
        if init_emb_weights is not None: init_emb_weights = [init_emb_weights]
        embedding = Embedding(
            input_dim=self.data_params['n_words'],
            output_dim=self.model_params.get('n_embed_dims', 100),
            weights=init_emb_weights,
            #mask_zero=True,#Whether 0 in the input is a special "padding" value that should be masked out.
            #If True, all subsequent layers must support masking, otherwise an exception will be raised.
            name='embedding_tokens')
        tokens_embedding = embedding(tokens)
        dropout = Dropout(0.25, name='dropout_tokens_embed')
        tokens_dropout = dropout(tokens_embedding)

        #4.maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False),
                         output_shape=lambda x: (x[0], x[2]),
                         name='maxpool_tokens')
        tokens_pool = maxpool(tokens_dropout)
        activation = Activation('tanh', name='active_tokens')
        tokens_repr = activation(tokens_pool)

        ## concatenate the representation of code ##
        merged_methname_api = Concatenate(name='merge_methname_api')(
            [methname_repr, apiseq_repr])
        merged_code_repr = Concatenate(name='merge_coderepr')(
            [merged_methname_api, tokens_repr])
        code_repr = Dense(self.model_params.get('n_hidden', 400),
                          activation='tanh',
                          name='dense_coderepr')(merged_code_repr)

        self._code_repr_model = Model(inputs=[methname, apiseq, tokens],
                                      outputs=[code_repr],
                                      name='code_repr_model')
        '''
        2. Build Desc Representation Model
        '''
        ## Desc Representation ##
        logger.debug('Building Desc Representation Model')
        desc = Input(shape=(self.data_params['desc_len'], ),
                     dtype='int32',
                     name='desc')
        #1.embedding
        init_emb_weights = np.load(
            self.model_params['init_embed_weights_desc']
        ) if self.model_params['init_embed_weights_desc'] is not None else None
        if init_emb_weights is not None: init_emb_weights = [init_emb_weights]
        embedding = Embedding(
            input_dim=self.data_params['n_words'],
            output_dim=self.model_params.get('n_embed_dims', 100),
            weights=init_emb_weights,
            mask_zero=
            True,  #Whether 0 in the input is a special "padding" value that should be masked out. 
            #If True, all subsequent layers must support masking, otherwise an exception will be raised.
            name='embedding_desc')
        desc_embedding = embedding(desc)
        dropout = Dropout(0.25, name='dropout_desc_embed')
        desc_dropout = dropout(desc_embedding)
        #2. rnn
        f_rnn = LSTM(self.model_params.get('n_lstm_dims', 100),
                     return_sequences=True,
                     recurrent_dropout=0.2,
                     name='lstm_desc_f')
        b_rnn = LSTM(self.model_params.get('n_lstm_dims', 100),
                     return_sequences=True,
                     recurrent_dropout=0.2,
                     name='lstm_desc_b',
                     go_backwards=True)
        desc_f_rnn = f_rnn(desc_dropout)
        desc_b_rnn = b_rnn(desc_dropout)
        dropout = Dropout(0.25, name='dropout_desc_rnn')
        desc_f_dropout = dropout(desc_f_rnn)
        desc_b_dropout = dropout(desc_b_rnn)
        #3. maxpooling
        maxpool = Lambda(lambda x: K.max(x, axis=1, keepdims=False),
                         output_shape=lambda x: (x[0], x[2]),
                         name='maxpool_desc')
        desc_pool = Concatenate(name='concat_desc_rnns')(
            [maxpool(desc_f_dropout),
             maxpool(desc_b_dropout)])
        activation = Activation('tanh', name='active_desc')
        desc_repr = activation(desc_pool)

        self._desc_repr_model = Model(inputs=[desc],
                                      outputs=[desc_repr],
                                      name='desc_repr_model')
        """
        3: calculate the cosine similarity between code and desc
        """
        logger.debug('Building similarity model')
        code_repr = self._code_repr_model([methname, apiseq, tokens])
        desc_repr = self._desc_repr_model([desc])
        cos_sim = Dot(axes=1, normalize=True,
                      name='cos_sim')([code_repr, desc_repr])

        sim_model = Model(inputs=[methname, apiseq, tokens, desc],
                          outputs=[cos_sim],
                          name='sim_model')
        self._sim_model = sim_model  #for model evaluation
        '''
        4:Build training model
        '''
        good_sim = sim_model(
            [self.methname, self.apiseq, self.tokens,
             self.desc_good])  # similarity of good output
        bad_sim = sim_model(
            [self.methname, self.apiseq, self.tokens,
             self.desc_bad])  #similarity of bad output
        loss = Lambda(lambda x: K.maximum(
            1e-6, self.model_params['margin'] - x[0] + x[1]),
                      output_shape=lambda x: x[0],
                      name='loss')([good_sim, bad_sim])

        logger.debug('Building training model')
        self._training_model = Model(inputs=[
            self.methname, self.apiseq, self.tokens, self.desc_good,
            self.desc_bad
        ],
                                     outputs=[loss],
                                     name='training_model')
Пример #10
0
def model_unet(inp):
    """
    Define the unet model. See relation for deeper explanation of this part of the code.

    :param inp (tf.keras.layers.Layer): Input of the NN
    :return: output (tf.keras.layers.Layer): last layer of the network (see relation)
    """
    padding = 'same'
    strides = (2, 2)
    kernel_size = (3, 3)

    conv1 = Conv2D(32, kernel_size, padding=padding)(inp)
    conv1 = LeakyReLU(alpha=0.2)(conv1)
    conv1 = Conv2D(32, kernel_size, padding=padding)(conv1)
    conv1 = LeakyReLU(alpha=0.2)(conv1)
    pool1 = MaxPooling2D(pool_size=(3, 3), strides=strides, padding=padding)(conv1)

    conv2 = Conv2D(64, kernel_size, padding=padding)(pool1)
    conv2 = LeakyReLU(alpha=0.2)(conv2)
    conv2 = Conv2D(64, kernel_size, padding=padding)(conv2)
    conv2 = LeakyReLU(alpha=0.2)(conv2)
    pool2 = MaxPooling2D(pool_size=(3, 3), strides=strides, padding=padding)(conv2)

    conv3 = Conv2D(128, kernel_size, padding=padding)(pool2)
    conv3 = LeakyReLU(alpha=0.2)(conv3)
    conv3 = Conv2D(128, kernel_size, padding=padding)(conv3)
    conv3 = LeakyReLU(alpha=0.2)(conv3)
    pool3 = MaxPooling2D(pool_size=kernel_size, strides=strides, padding=padding)(conv3)

    conv4 = Conv2D(256, kernel_size, padding=padding)(pool3)
    conv4 = LeakyReLU(alpha=0.2)(conv4)
    conv4 = Conv2D(256, kernel_size, padding=padding)(conv4)
    conv4 = LeakyReLU(alpha=0.2)(conv4)
    pool4 = MaxPooling2D(pool_size=(3, 3), strides=strides, padding=padding)(conv4)

    conv5 = Conv2D(512, kernel_size, padding=padding)(pool4)
    conv5 = LeakyReLU(alpha=0.2)(conv5)
    conv5 = Conv2D(512, kernel_size, padding=padding)(conv5)
    conv5 = LeakyReLU(alpha=0.2)(conv5)

    up6 = Conv2DTranspose(256, kernel_size, strides=strides, padding=padding)(
        conv5)
    up6 = Concatenate()([conv4, up6])
    conv6 = Conv2D(256, kernel_size, padding=padding)(up6)
    conv6 = LeakyReLU(alpha=0.2)(conv6)
    conv6 = Conv2D(256, kernel_size, padding=padding)(conv6)
    conv6 = LeakyReLU(alpha=0.2)(conv6)

    up7 = Conv2DTranspose(128, kernel_size, strides=strides, padding=padding)(
        conv6)
    up7 = Concatenate()([conv3, up7])
    conv7 = Conv2D(128, kernel_size, padding=padding)(up7)
    conv7 = LeakyReLU(alpha=0.2)(conv7)
    conv7 = Conv2D(128, kernel_size, padding=padding)(conv7)
    conv7 = LeakyReLU(alpha=0.2)(conv7)

    up8 = Conv2DTranspose(64, kernel_size, strides=strides, padding=padding)(
        conv7)
    up8 = Concatenate()([conv2, up8])
    conv8 = Conv2D(64, kernel_size, padding=padding)(up8)
    conv8 = LeakyReLU(alpha=0.2)(conv8)
    conv8 = Conv2D(64, kernel_size, padding=padding)(conv8)
    conv8 = LeakyReLU(alpha=0.2)(conv8)

    up9 = Conv2DTranspose(32, kernel_size, strides=strides, padding=padding)(
        conv8)
    up9 = Concatenate()([conv1, up9])
    conv9 = Conv2D(32, kernel_size, padding=padding)(up9)
    conv9 = LeakyReLU(alpha=0.2)(conv9)
    conv9 = Conv2D(32, kernel_size, padding=padding)(conv9)
    conv9 = LeakyReLU(alpha=0.2)(conv9)

    conv10 = Conv2D(12, kernel_size, padding=padding)(conv9)
    conv10 = LeakyReLU(alpha=0.2)(conv10)
    drop = Dropout(0.3)(conv10)

    output = Conv2D(3, kernel_size, padding=padding, activation='sigmoid')(drop)

    return output
Пример #11
0
def create_model(l2, num_classes, num_ctrl_classes):
    ##############
    # BRANCH MODEL
    ##############
    regul = regularizers.l2(l2)
    #    optim = Adam(lr=lr)
    #    kwargs = {'kernel_regularizer': regul}

    base_model = efn.EfficientNetB2(input_shape=(network_shape[0],
                                                 network_shape[1], 3),
                                    weights='imagenet',
                                    include_top=False)

    input_tensor = Input(shape=network_shape, dtype=K.floatx())
    conv1 = Conv2D(32,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   activation='relu',
                   use_bias=False,
                   padding='same',
                   input_shape=(input_shape[0] + 6, input_shape[1] + 6,
                                input_shape[2]))
    layers = []
    layers.append(input_tensor)
    layers.append(conv1)
    layers[2:] = base_model.layers[2:]

    new_model = copy_model_graph(layers, base_model, input_tensor)

    weights = base_model.layers[1].get_weights()
    weight0 = weights[0]
    w = np.concatenate((weight0, weight0), axis=2)
    w = w / 2.0
    weights[0] = w
    #    weights.append(np.zeros((64),dtype='float32'))

    new_model.layers[1].set_weights(weights)

    inp = Input(shape=input_shape, dtype='uint8')  # 384x384x6
    x = Lambda(augment)(inp)

    for layer in new_model.layers:
        if type(layer) is Conv2D:
            layer.kernel_regularizer = regul
    x = new_model(x)
    x = GlobalMaxPooling2D()(x)

    x = BatchNormalization()(x)
    x = Dropout(rate=0.5)(x)
    x = Flatten()(x)
    x = Dense(512, use_bias=False, kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)

    encoder_model = Model(inputs=inp, outputs=x)

    # softmax model for training encoder
    output_softmax = Dense(num_classes, use_bias=False,
                           activation='softmax')(x)
    softmax_model = Model(inputs=inp, outputs=output_softmax)

    #################
    # COMPARE MODEL #
    #################
    mid = 32
    xa_inp = Input(shape=encoder_model.output_shape[1:])
    xb_inp = Input(shape=encoder_model.output_shape[1:])
    x1 = Lambda(lambda x: x[0] * x[1])([xa_inp, xb_inp])
    x2 = Lambda(lambda x: x[0] + x[1])([xa_inp, xb_inp])
    x3 = Lambda(lambda x: x[0] - x[1])([xa_inp, xb_inp])
    x4 = Lambda(lambda x: K.square(x))(x3)
    head = Concatenate()([x1, x2, x3, x4])
    head = Reshape((4, encoder_model.output_shape[1], 1),
                   name='reshape1')(head)
    # Per feature NN with shared weight is implemented using CONV2D with appropriate stride.
    head = Conv2D(mid, (4, 1), activation='relu', padding='valid')(head)
    head = Reshape((encoder_model.output_shape[1], mid, 1))(head)
    head = Conv2D(1, (1, mid), activation='linear', padding='valid')(head)
    head = Flatten()(head)

    compare_model = Model([xa_inp, xb_inp], head)

    # process encoding from control
    # compare the current features to all controls
    features_controls = Input(
        shape=[num_ctrl_classes, encoder_model.output_shape[1]])
    fs = Lambda(lambda x: tf.unstack(x, axis=1))(features_controls)
    #    def create_mask(features_controls):
    #        # Use a function with a Keras Lambda layer wrapper to resolve a tensorflow issue.
    #        # https://stackoverflow.com/questions/50715928/valueerror-output-tensors-to-a-model-must-be-the-output-of-a-tensorflow-layer
    #        max_abs_features = K.max(K.abs(features_controls), axis=2)
    #        mask = tf.greater(max_abs_features, K.epsilon())
    #        mask = tf.expand_dims(tf.expand_dims(tf.dtypes.cast(mask, K.floatx()), axis=-1), axis=-1)
    #        return mask
    #    mask = Lambda(create_mask)(features_controls)
    comps = []
    for f in fs:
        comp = compare_model([x, f])
        comps.append(comp)
    c = Concatenate()(comps)
    c = Reshape((num_ctrl_classes, encoder_model.output_shape[1], 1))(c)
    #    c = Lambda(lambda x: tf.math.multiply(x[0], x[1]))([c, mask])

    #    compare = Lambda(compare_features)([x, features_controls])
    # Per feature NN with shared weight is implemented using CONV2D with appropriate stride.
    compare = Conv2D(mid, (num_ctrl_classes, 1),
                     activation='relu',
                     padding='valid')(c)
    compare = Reshape((encoder_model.output_shape[1], mid, 1))(compare)
    compare = Conv2D(1, (1, mid), activation='linear',
                     padding='valid')(compare)
    compare = Flatten(name='flatten2')(compare)

    feature_model = Model(inputs=[inp, features_controls], outputs=compare)

    label = Input(shape=(num_classes, ))

    output_arcface = ArcFace(num_classes, regularizer=regul)([compare, label])
    arcface_model = Model([inp, features_controls, label], output_arcface)

    output_cosface = CosFace(num_classes, regularizer=regul)([compare, label])
    cosface_model = Model([inp, features_controls, label], output_cosface)

    return encoder_model, softmax_model, feature_model, arcface_model, cosface_model
features_num = 10
# Users embedding features.
user_input = Input(shape=[1])
user_embedding = Embedding(len(dataset.user_id.unique()) + 1,
                           features_num)(user_input)
user_vector = Flatten()(user_embedding)

# Books embedding features.
book_input = Input(shape=[1])
book_embedding = Embedding(len(dataset.book_id.unique()) + 1,
                           features_num)(book_input)
book_vector = Flatten()(book_embedding)

# Concatenate features.
concatenated = Concatenate()([book_vector, user_vector])

# Create model.
layer1 = Dense(128, activation='relu')(concatenated)
dropout1 = Dropout(0.1)(layer1)
layer2 = Dense(64, activation='relu')(dropout1)
dropout2 = Dropout(0.1)(layer2)
output = Dense(1)(dropout2)
model = Model([user_input, book_input], output)
model.compile('adam', 'mean_squared_error')

# Train.
model.fit([train.user_id, train.book_id],
          train.rating,
          epochs=15,
          batch_size=256)
Пример #13
0
    def __init__(self):
        super(MedModel, self).__init__()

        # Optimizer
        self.optimizer = tf.keras.optimizers.SGD(
            learning_rate=hp.learning_rate, momentum=hp.momentum)

        # Load instance of small model .h5 (get_appropriate layer and those weight)
        small_model = SmallModel()
        small_model(tf.keras.Input(shape=(8, 8, 3)))
        print(os.getcwd())
        small_model.load_weights("./models/small_weights.h5")
        self.small_model = small_model

        initializer = tf.keras.initializers.Ones()

        # Define Model Layers
        # First Conv Block
        self.med_conv1 = Conv2D(filters=64,
                                kernel_size=3,
                                strides=1,
                                padding='SAME',
                                activation=None,
                                name="med_conv1")
        self.upsamp_small_filters_conv1 = Conv2D(
            filters=64,
            kernel_size=3,
            kernel_initializer=self.small_conv1_init,
            padding='SAME',
            name='upsamp_small_filters_conv1',
            trainable=False)
        self.comb_tensors1 = Concatenate(axis=3, name="med_concat1")
        self.med_bn1 = BatchNormalization(name="med_bn1")
        self.med_relu1 = ReLU(name="med_relu1")

        # Second Conv Block
        self.med_conv2 = Conv2D(filters=64,
                                kernel_size=3,
                                strides=1,
                                padding='SAME',
                                activation=None,
                                name="med_conv2")
        self.down_med_relu1 = Conv2D(filters=64,
                                     kernel_size=1,
                                     padding='SAME',
                                     activation=None,
                                     kernel_initializer=initializer,
                                     name="reduce_filters",
                                     trainable=False)
        self.upsamp_small_filters_conv2 = Conv2D(
            filters=64,
            kernel_size=3,
            kernel_initializer=self.small_conv2_init,
            padding='SAME',
            name='upsamp_small_filters_conv2',
            trainable=False)
        self.comb_tensors2 = Concatenate(axis=3, name="med_concat2")
        self.med_bn2 = BatchNormalization(name="med_bn2")
        self.med_relu2 = ReLU(name="med_relu2")

        # Third Conv Block
        self.med_conv3 = Conv2D(filters=64,
                                kernel_size=3,
                                strides=1,
                                padding='SAME',
                                activation=None,
                                name="med_conv3")
        self.med_bn3 = BatchNormalization(name="med_bn3")
        self.med_relu3 = ReLU(name="med_relu3")

        # Fourth Conv Block
        self.med_conv4 = Conv2D(filters=64,
                                kernel_size=3,
                                strides=1,
                                padding='SAME',
                                activation=None,
                                name="med_conv4")
        self.med_bn4 = BatchNormalization(name="med_bn4")
        self.med_relu4 = ReLU(name="med_relu4")

        # Classification Part
        self.med_class_conv1 = Conv2D(filters=128,
                                      kernel_size=3,
                                      strides=2,
                                      padding='same',
                                      name="med_class_conv1")
        self.med_class_conv2 = Conv2D(filters=128,
                                      kernel_size=3,
                                      strides=2,
                                      padding='same',
                                      name="med_class_conv2")
        self.med_class_flatten = Flatten(name="med_class_flatten")
        self.med_class_dense = Dense(units=10, activation='softmax')
Пример #14
0
class MedModel(tf.keras.Model):
    def __init__(self):
        super(MedModel, self).__init__()

        # Optimizer
        self.optimizer = tf.keras.optimizers.SGD(
            learning_rate=hp.learning_rate, momentum=hp.momentum)

        # Load instance of small model .h5 (get_appropriate layer and those weight)
        small_model = SmallModel()
        small_model(tf.keras.Input(shape=(8, 8, 3)))
        print(os.getcwd())
        small_model.load_weights("./models/small_weights.h5")
        self.small_model = small_model

        initializer = tf.keras.initializers.Ones()

        # Define Model Layers
        # First Conv Block
        self.med_conv1 = Conv2D(filters=64,
                                kernel_size=3,
                                strides=1,
                                padding='SAME',
                                activation=None,
                                name="med_conv1")
        self.upsamp_small_filters_conv1 = Conv2D(
            filters=64,
            kernel_size=3,
            kernel_initializer=self.small_conv1_init,
            padding='SAME',
            name='upsamp_small_filters_conv1',
            trainable=False)
        self.comb_tensors1 = Concatenate(axis=3, name="med_concat1")
        self.med_bn1 = BatchNormalization(name="med_bn1")
        self.med_relu1 = ReLU(name="med_relu1")

        # Second Conv Block
        self.med_conv2 = Conv2D(filters=64,
                                kernel_size=3,
                                strides=1,
                                padding='SAME',
                                activation=None,
                                name="med_conv2")
        self.down_med_relu1 = Conv2D(filters=64,
                                     kernel_size=1,
                                     padding='SAME',
                                     activation=None,
                                     kernel_initializer=initializer,
                                     name="reduce_filters",
                                     trainable=False)
        self.upsamp_small_filters_conv2 = Conv2D(
            filters=64,
            kernel_size=3,
            kernel_initializer=self.small_conv2_init,
            padding='SAME',
            name='upsamp_small_filters_conv2',
            trainable=False)
        self.comb_tensors2 = Concatenate(axis=3, name="med_concat2")
        self.med_bn2 = BatchNormalization(name="med_bn2")
        self.med_relu2 = ReLU(name="med_relu2")

        # Third Conv Block
        self.med_conv3 = Conv2D(filters=64,
                                kernel_size=3,
                                strides=1,
                                padding='SAME',
                                activation=None,
                                name="med_conv3")
        self.med_bn3 = BatchNormalization(name="med_bn3")
        self.med_relu3 = ReLU(name="med_relu3")

        # Fourth Conv Block
        self.med_conv4 = Conv2D(filters=64,
                                kernel_size=3,
                                strides=1,
                                padding='SAME',
                                activation=None,
                                name="med_conv4")
        self.med_bn4 = BatchNormalization(name="med_bn4")
        self.med_relu4 = ReLU(name="med_relu4")

        # Classification Part
        self.med_class_conv1 = Conv2D(filters=128,
                                      kernel_size=3,
                                      strides=2,
                                      padding='same',
                                      name="med_class_conv1")
        self.med_class_conv2 = Conv2D(filters=128,
                                      kernel_size=3,
                                      strides=2,
                                      padding='same',
                                      name="med_class_conv2")
        self.med_class_flatten = Flatten(name="med_class_flatten")
        self.med_class_dense = Dense(units=10, activation='softmax')

    # This function returns small_conv1_filters
    def small_conv1_init(self, shape, dtype=None):
        small_conv1_filters, biases = self.small_model.get_layer(
            "small_conv1").get_weights()
        return small_conv1_filters

    # This function returns small_conv2_filters
    def small_conv2_init(self, shape, dtype=None):
        small_conv2_filters, biases = self.small_model.get_layer(
            "small_conv2").get_weights()
        return small_conv2_filters

    def call(self, inputs, training=False):
        """
        The call function is inherited from Model. It defines the behaviour of the model.
        In this function we will connect the layers we defined in __init__ together.
        Please review Connection Scheme and observe naming conventions.

        :param inputs: these are the images that are passed in shape (batches, height, width, channels)
        :param training: BOOL this is a MODEL param that indicates if we are training or testing... I'm still trying to figure this out...
        :return: stuff (softmax class probabilities in this case)
        """

        # Connect First Med Conv Block
        med_conv1 = self.med_conv1.apply(inputs)
        upsamp_small_filters_conv1 = self.upsamp_small_filters_conv1.apply(
            inputs)
        comb_tensors1 = self.comb_tensors1.apply(
            [med_conv1, upsamp_small_filters_conv1])
        med_bn1 = self.med_bn1.apply(comb_tensors1)
        med_relu1 = self.med_relu1.apply(med_bn1)

        # Connect Second Med Conv Block
        med_conv2 = self.med_conv2.apply(med_relu1)
        down_samp_relu1 = self.down_med_relu1.apply(med_relu1)
        upsamp_small_filters_conv2 = self.upsamp_small_filters_conv2.apply(
            down_samp_relu1)
        comb_tensors2 = self.comb_tensors2.apply(
            [med_conv2, upsamp_small_filters_conv2])
        med_bn2 = self.med_bn2.apply(comb_tensors2)
        med_relu2 = self.med_relu2.apply(med_bn2)

        # Connect Third Med Conv Block
        med_conv3 = self.med_conv3.apply(med_relu2)
        med_bn3 = self.med_bn3.apply(med_conv3)
        med_relu3 = self.med_relu3.apply(med_bn3)

        # Connect Fourth Med Conv Block
        med_conv4 = self.med_conv4.apply(med_relu3)
        med_bn4 = self.med_bn4.apply(med_conv4)
        med_relu4 = self.med_relu4.apply(med_bn4)

        # Connect Small Class Block
        med_class_conv1 = self.med_class_conv1.apply(med_relu4)
        med_class_conv2 = self.med_class_conv2.apply(med_class_conv1)
        med_class_flatten = self.med_class_flatten.apply(med_class_conv2)
        med_class_dense = self.med_class_dense.apply(med_class_flatten)

        # if training:
        #     output = med_class_dense
        # else:
        #     #pred = np.argmax(med_class_dense)
        #     #conf = np.max(med_class_dense)
        #     #output = [pred, conf]

        return med_class_dense

    @staticmethod
    def loss_fn(labels, predictions):
        """ Loss function for model. """

        return tf.keras.losses.sparse_categorical_crossentropy(
            labels, predictions, from_logits=False)
Пример #15
0
    def identity_block(x,
                       filters_in,
                       filters_out,
                       cardinality=32,
                       ratio=16,
                       init_weights=None):
        """ Construct a ResNeXT block with identity link
            x          : input to block
            filters_in : number of filters  (channels) at the input convolution
            filters_out: number of filters (channels) at the output convolution
            cardinality: width of cardinality layer
            ratio      : amount of filter reduction during squeeze
        """
        if init_weights is None:
            init_weights = SEResNeXt.init_weights

        # Remember the input
        shortcut = x

        # Dimensionality Reduction
        x = Conv2D(filters_in,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   padding='same',
                   kernel_initializer=init_weights)(shortcut)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Cardinality (Wide) Layer (split-transform)
        filters_card = filters_in // cardinality
        groups = []
        for i in range(cardinality):
            group = Lambda(lambda z: z[:, :, :, i * filters_card:i *
                                       filters_card + filters_card])(x)
            groups.append(
                Conv2D(filters_card,
                       kernel_size=(3, 3),
                       strides=(1, 1),
                       padding='same',
                       kernel_initializer=init_weights)(group))

        # Concatenate the outputs of the cardinality layer together (merge)
        x = Concatenate()(groups)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Dimensionality restoration
        x = Conv2D(filters_out,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   padding='same',
                   kernel_initializer=init_weights)(x)
        x = BatchNormalization()(x)

        # Pass the output through the squeeze and excitation block
        x = SEResNeXt.squeeze_excite_block(x, ratio, init_weights)

        # Identity Link: Add the shortcut (input) to the output of the block
        x = Add()([shortcut, x])
        x = ReLU()(x)
        return x
Пример #16
0
# Decoder Layer
decoder_layer = GRU(hidden_size,
                    return_sequences=True,
                    dropout=dropout,
                    recurrent_dropout=dropout / 2,
                    name='decode_layer')
decoder_outputs = decoder_layer(decoder_embedding,
                                initial_state=encoder_states)

# Attention Layer
attention_layer = Attention(name='attention_layer')
attention_outputs = attention_layer([decoder_outputs, encoder_outputs])

# Concatenate the Result of Attention and the Hidden States of Decoder
decoder_concat_inputs = Concatenate(
    axis=-1, name='concatenate_layer')([decoder_outputs, attention_outputs])

# Output Layer
output_layer = Dense(num_of_morphemes, activation=softmax, name='output_layer')
outputs = output_layer(decoder_concat_inputs)

# Define Model
model = Model(inputs=[encoder_inputs, decoder_inputs],
              outputs=outputs,
              name='training_model')

# Compile
model.compile(optimizer=Adam(learning_rate=learning_late),
              loss=sparse_categorical_crossentropy)

# Display Model Summary
Пример #17
0
    def projection_block(x,
                         filters_in,
                         filters_out,
                         cardinality=32,
                         strides=1,
                         ratio=16,
                         init_weights=None):
        """ Construct a ResNeXT block with projection shortcut
            x          : input to the block
            filters_in : number of filters  (channels) at the input convolution
            filters_out: number of filters (channels) at the output convolution
            cardinality: width of cardinality layer
            strides    : whether entry convolution is strided (i.e., (2, 2) vs (1, 1))
            ratio      : amount of filter reduction during squeeze
        """
        if init_weights is None:
            init_weights = SEResNeXt.init_weights

        # Construct the projection shortcut
        # Increase filters by 2X to match shape when added to output of block
        shortcut = Conv2D(filters_out,
                          kernel_size=(1, 1),
                          strides=strides,
                          padding='same',
                          kernel_initializer=init_weights)(x)
        shortcut = BatchNormalization()(shortcut)

        # Dimensionality Reduction
        x = Conv2D(filters_in,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   padding='same',
                   kernel_initializer=init_weights)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Cardinality (Wide) Layer (split-transform)
        filters_card = filters_in // cardinality
        groups = []
        for i in range(cardinality):
            group = Lambda(lambda z: z[:, :, :, i * filters_card:i *
                                       filters_card + filters_card])(x)
            groups.append(
                Conv2D(filters_card,
                       kernel_size=(3, 3),
                       strides=strides,
                       padding='same',
                       kernel_initializer=init_weights)(group))

        # Concatenate the outputs of the cardinality layer together (merge)
        x = Concatenate()(groups)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Dimensionality restoration
        x = Conv2D(filters_out,
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   padding='same',
                   kernel_initializer=init_weights)(x)
        x = BatchNormalization()(x)

        # Pass the output through the squeeze and excitation block
        x = SEResNeXt.squeeze_excite_block(x, ratio, init_weights)

        # Add the projection shortcut (input) to the output of the block
        x = Add()([shortcut, x])
        x = ReLU()(x)
        return x
Пример #18
0
# 모델2
input2 = Input(shape=(3, ))
dense2 = Dense(10, activation='relu')(input2)
dense2 = Dense(5, activation='relu')(dense2)
dense2 = Dense(5, activation='relu')(dense2)
dense2 = Dense(5, activation='relu')(dense2)
# output2 = Dense(3)(dense2)

# 모델 병합 / concatenate
from tensorflow.keras.layers import Concatenate
# from keras.layers.merge import concatenate, Concatenate
# from keras.layers import concatenate, Concatenate

# merge = 합치다
merge1 = Concatenate()([dense1, dense2])  # 제일 끝의 dense 변수명 넣기
middle1 = Dense(30)(merge1)
middle1 = Dense(10)(middle1)
middle1 = Dense(10)(middle1)

# 모델 분기1
output1 = Dense(30)(middle1)
output1 = Dense(7)(output1)
output1 = Dense(3)(output1)

# 모델 분기2
output2 = Dense(15)(middle1)
output2 = Dense(7)(output2)
output2 = Dense(7)(output2)
output2 = Dense(3)(output2)
    def bayes_unet_model_3d_hybrid(self,
                                   filter_root,
                                   depth,
                                   categorical_kccs,
                                   voxel_dim=64,
                                   deviation_channels=3,
                                   output_heads=2):
        """Build the 3D Model using the specified loss function, the inputs are parsed from the assemblyconfig_<case_study_name>.py file

			:param voxel_dim: The voxel dimension of the input, required to build input to the 3D CNN model
			:type voxel_dim: int (required)

			:param voxel_channels: The number of voxel channels in the input structure, required to build input to the 3D CNN model
			:type voxel_channels: int (required)
		"""

        import tensorflow as tf
        import tensorflow_probability as tfp
        tfd = tfp.distributions
        import numpy as np
        from tensorflow.keras.models import Model
        import tensorflow.keras.backend as K
        from tensorflow.keras.models import Model
        from tensorflow.keras.layers import Conv3D, MaxPooling3D, Add, BatchNormalization, Input, Activation, Lambda, Concatenate, Flatten, Dense, UpSampling3D, GlobalAveragePooling3D
        from tensorflow.keras.utils import plot_model

        from tensorflow.keras.layers import add, multiply
        from tensorflow.keras.layers import Input
        from tensorflow.keras.utils import plot_model

        #Testing
        #reg_kccs=self.output_dimension-categorical_kccs

        #Testing
        reg_kccs = 148 - categorical_kccs

        # Probabalistic Losses
        negloglik = lambda y, rv_y: -rv_y.log_prob(y)
        bin_crossentropy = tf.keras.losses.BinaryCrossentropy()
        mse_basic = tf.keras.losses.MeanSquaredError()

        #Can also Try
        #scale_factor=4000/64 #number of samples/batchsize
        #kl = sum(model.losses) / scale_factor
        #Annealing of KL divergence
        #bin_crossentropy=tf.keras.losses.BinaryCrossentropy()+kl* K.get_value(kl_alpha)

        overall_loss_dict = {
            "regression_outputs": negloglik,
            "classification_outputs": bin_crossentropy,
            "shape_error_outputs": mse_basic
        }

        overall_loss_weights = {
            "regression_outputs": 2.0,
            "classification_outputs": 2.0,
            "shape_error_outputs": 1.0
        }

        overall_metrics_dict = {
            "regression_outputs": [tf.keras.metrics.MeanAbsoluteError()],
            "classification_outputs": [tf.keras.metrics.CategoricalAccuracy()],
            "shape_error_outputs": [tf.keras.metrics.MeanAbsoluteError()]
        }

        #constant aleatoric uncertainty
        aleatoric_std = 0.001
        aleatoric_std_cop = 0.001
        aleatoric_tensor = [aleatoric_std] * reg_kccs

        tfd = tfp.distributions
        kl_divergence_function = (
            lambda q, p, _: tfd.kl_divergence(q, p) /
            (tf.cast(4000, dtype=tf.float32) / tf.cast(64, dtype=tf.float32)))

        c = np.log(np.expm1(1.))

        long_connection_store = {}

        Conv = Conv3D
        MaxPooling = MaxPooling3D
        UpSampling = UpSampling3D

        activation = 'relu'
        final_activation = 'linear'

        def attention_block(x, g, inter_channel):

            theta_x = Conv(inter_channel, [1, 1, 1], strides=[1, 1, 1])(x)
            phi_g = Conv(inter_channel, [1, 1, 1], strides=[1, 1, 1])(g)

            f = Activation('relu')(add([theta_x, phi_g]))
            psi_f = Conv(1, [1, 1, 1], strides=[1, 1, 1])(f)

            rate = Activation('sigmoid')(psi_f)

            att_x = multiply([x, rate])
            return att_x

        input_size = (voxel_dim, voxel_dim, voxel_dim, deviation_channels)
        inputs = Input(input_size)
        x = inputs

        # Down sampling
        for i in range(depth):
            out_channel = 2**i * filter_root

            # Residual/Skip connection
            res = tfp.layers.Convolution3DFlipout(
                out_channel,
                kernel_size=1,
                kernel_divergence_fn=kl_divergence_function,
                padding='same',
                name="Identity{}_1".format(i))(x)

            # First Conv Block with Conv, BN and activation
            conv1 = tfp.layers.Convolution3DFlipout(
                out_channel,
                kernel_size=3,
                kernel_divergence_fn=kl_divergence_function,
                padding='same',
                name="Conv{}_1".format(i))(x)
            #if batch_norm:
            #conv1 = BatchNormalization(name="BN{}_1".format(i))(conv1)
            act1 = Activation(activation, name="Act{}_1".format(i))(conv1)

            # Second Conv block with Conv and BN only
            conv2 = tfp.layers.Convolution3DFlipout(
                out_channel,
                kernel_size=3,
                padding='same',
                kernel_divergence_fn=kl_divergence_function,
                name="Conv{}_2".format(i))(act1)
            #if batch_norm:
            #conv2 = BatchNormalization(name="BN{}_2".format(i))(conv2)

            resconnection = Add(name="Add{}_1".format(i))([res, conv2])

            act2 = Activation(activation,
                              name="Act{}_2".format(i))(resconnection)

            # Max pooling
            if i < depth - 1:
                long_connection_store[str(i)] = act2
                x = MaxPooling(padding='same',
                               name="MaxPooling{}_1".format(i))(act2)
            else:
                x = act2

        feature_vector_reg = Conv(reg_kccs,
                                  1,
                                  padding='same',
                                  activation=final_activation,
                                  name='Process_Parameter_Reg_output')(x)
        process_parameter_reg = GlobalAveragePooling3D()(feature_vector_reg)
        #process_parameter_reg=tfp.layers.DenseFlipout(64,kernel_divergence_fn=kl_divergence_function)(process_parameter_reg)
        #process_parameter_reg=tfp.layers.DenseFlipout(123,kernel_divergence_fn=kl_divergence_function)(process_parameter_reg)

        feature_vector_cla = Conv(categorical_kccs,
                                  1,
                                  padding='same',
                                  activation=final_activation,
                                  name='Process_Parameter_Cla_output')(x)
        process_parameter_cla = GlobalAveragePooling3D()(feature_vector_cla)
        #process_parameter_cla=tfp.layers.DenseFlipout(64,kernel_divergence_fn=kl_divergence_function)(process_parameter_cla)
        #process_parameter_cla=tfp.layers.DenseFlipout(25,kernel_divergence_fn=kl_divergence_function)(process_parameter_cla)
        #feature_categorical=Flatten()(feature_vector)
        #reg_output=tfp.layers.DenseFlipout(output_dimension,kernel_divergence_fn=kl_divergence_function)(process_parameter)

        #Process Parameter Outputs
        reg_distrbution = tfp.layers.DistributionLambda(
            lambda t: tfd.MultivariateNormalDiag(loc=t[..., :reg_kccs],
                                                 scale_diag=aleatoric_tensor),
            name="regression_outputs")(process_parameter_reg)
        cla_distrbution = Activation(
            'sigmoid', name="classification_outputs")(process_parameter_cla)
        #cla_distrbution=tfp.layers.DenseFlipout(categorical_kccs, kernel_divergence_fn=kl_divergence_function,activation=tf.nn.sigmoid,name="classification_outputs")(process_parameter_cla)

        # Upsampling
        for i in range(depth - 2, -1, -1):
            out_channel = 2**(i) * filter_root

            # long connection from down sampling path.
            long_connection = long_connection_store[str(i)]

            up1 = UpSampling(name="UpSampling{}_1".format(i))(x)
            up_conv1 = Conv(out_channel,
                            2,
                            activation='relu',
                            padding='same',
                            name="upConvSam{}_1".format(i))(up1)

            attention_layer = attention_block(x=long_connection,
                                              g=up_conv1,
                                              inter_channel=out_channel // 4)
            #  Concatenate.

            #up_conc = Concatenate(axis=-1, name="upConcatenate{}_1".format(i))([up_conv1, long_connection])
            up_conc = Concatenate(axis=-1, name="upConcatenate{}_1".format(i))(
                [up_conv1, attention_layer])

            #  Convolutions
            up_conv2 = Conv(out_channel,
                            3,
                            padding='same',
                            name="upConv{}_1".format(i))(up_conc)

            up_act1 = Activation(activation,
                                 name="upAct{}_1".format(i))(up_conv2)

            up_conv2 = Conv(out_channel,
                            3,
                            padding='same',
                            name="upConv{}_2".format(i))(up_act1)

            # Residual/Skip connection
            res = Conv(out_channel,
                       kernel_size=1,
                       padding='same',
                       use_bias=False,
                       name="upIdentity{}_1".format(i))(up_conc)

            resconnection = Add(name="upAdd{}_1".format(i))([res, up_conv2])

            x = Activation(activation,
                           name="upAct{}_2".format(i))(resconnection)

        output_list = []
        output_list.append(reg_distrbution)
        output_list.append(cla_distrbution)

        output = Conv(deviation_channels * output_heads,
                      1,
                      padding='same',
                      activation=final_activation,
                      name='shape_error_outputs')(x)
        output_list.append(output)

        model = Model(inputs, outputs=output_list, name='Hybrid_Unet_Model')

        #Loss Dictonary Created
        #model_losses=[]
        #model_losses.append(negloglik)
        #model_losses.append(bin_crossentropy)

        model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001),
                      experimental_run_tf_function=False,
                      loss=overall_loss_dict,
                      metrics=overall_metrics_dict,
                      loss_weights=overall_loss_weights)
        print("3D CNN model successfully compiled")
        print(model.summary())

        #plot_model(model,to_file='Bayes_OSER2.png',show_shapes=True, show_layer_names=True)

        return model
Пример #20
0
def u_net_module_hp(hp):

    ############################## Special Fun ###################################################

    # embedding = tf.Variable(initial_value = np.load('../Embedding/embedd_w_5_ogt.npy').reshape(1,21,5))

    ############################## Transformation module #################################################

    p = {
        'n_fil_1': hp.Choice('number_filters_conv1', [8, 16, 32, 64]),
        's_fil_1': hp.Choice('size_filters_conv1', [3, 5, 7, 10]),
        'stride_1': hp.Choice('stride_length_sampling1', [2, 4]),
        'dOut_1': hp.Choice('Dropout_module1', [0.1, 0.2, 0.3, 0.4]),
        'n_fil_2': hp.Choice('number_filters_conv2', [32, 64, 128]),
        's_fil_2': hp.Choice('size_filters_conv2', [3, 5, 7, 10]),
        'stride_2': hp.Choice('stride_length_sampling2', [2, 4]),
        'dOut_2': hp.Choice('Dropout_module2', [0.1, 0.2, 0.3, 0.4]),
        'n_fil_3': hp.Choice('number_filters_conv3', [64, 128, 256]),
        's_fil_3': hp.Choice('size_filters_conv3', [3, 5, 7, 10]),
        'stride_3': hp.Choice('stride_length_sampling3', [2, 4]),
        'dOut_3': hp.Choice('Dropout_module3', [0.1, 0.2, 0.3, 0.4]),
        'n_fil_4': hp.Choice('number_filters_conv4', [128, 256, 512]),
        's_fil_4': hp.Choice('size_filters_conv4', [3, 5, 7, 10]),
        'dOut_4': hp.Choice('Dropout_module4', [0.1, 0.2, 0.3, 0.4]),
        'dOut_5': hp.Choice('Dropout_module5', [0.05, 0.1, 0.2, 0.3]),
        's_fil_5': hp.Choice('size_filters_conv5', [3, 5, 7, 10])
    }

    # Layers of stage 0 contraction

    inp = Input(shape=(1024, 21))

    #tct0_in =  Dot(axes=(2,1))([inp,embedding])
    tct0_bn1 = BatchNormalization()(inp)  #tct0_in)
    #tf.keras.regularizers.L1L2(l1=0.0, l2=0.0)
    tct0_conv1 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_0')(tct0_bn1)
    tct0_bn2 = BatchNormalization()(tct0_conv1)
    tct0_conv2 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_1')(tct0_bn2)
    tct0_bn3 = BatchNormalization()(tct0_conv2)
    tct0_conv3 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        strides=int(p['stride_1']),
                        padding='same',
                        name='Convolution_Ct0_2')(tct0_bn3)
    tct0_bn4 = BatchNormalization()(tct0_conv3)
    #tct0_max   = MaxPool1D(pool_size=2, strides=2)(tct0_bn2)
    tct0_dp = Dropout(p['dOut_1'])(tct0_bn4)

    # Layers of stage 1 contraction

    tct1_conv1 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct1_0')(tct0_dp)
    tct1_bn1 = BatchNormalization()(tct1_conv1)
    tct1_conv2 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        strides=1,
                        padding='same',
                        name='Convolution_Ct1_1')(tct1_bn1)
    tct1_bn2 = BatchNormalization()(tct1_conv2)
    tct1_conv3 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        strides=int(p['stride_2']),
                        padding='same',
                        name='Convolution_Ct1_2')(tct1_bn2)
    tct1_bn3 = BatchNormalization()(tct1_conv3)
    #tct1_max   = MaxPool1D(pool_size=2, strides=2)(tct1_bn2)
    tct1_dp = Dropout(p['dOut_2'])(tct1_bn3)

    # Layers of stage 2 contraction

    tct2_conv1 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct2_0')(tct1_dp)
    tct2_bn1 = BatchNormalization()(tct2_conv1)
    tct2_conv2 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        strides=1,
                        padding='same',
                        name='Convolution_Ct2_1')(tct2_bn1)
    tct2_bn2 = BatchNormalization()(tct2_conv2)
    tct2_conv3 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        strides=int(p['stride_3']),
                        padding='same',
                        name='Convolution_Ct2_2')(tct2_bn2)
    tct2_bn3 = BatchNormalization()(tct2_conv3)
    #tct2_max   = MaxPool1D(pool_size=2, strides=2)(tct2_bn2)
    tct2_dp = Dropout(p['dOut_3'])(tct2_bn3)

    # Layers of stage 3 contraction

    tct3_conv1 = Conv1D(int(p['n_fil_4']),
                        int(p['s_fil_4']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_0')(tct2_dp)
    tct3_bn1 = BatchNormalization()(tct3_conv1)
    tct3_conv2 = Conv1D(int(p['n_fil_4']),
                        int(p['s_fil_4']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_1')(tct3_bn1)
    tct3_bn2 = BatchNormalization()(tct3_conv2)
    tct3_dp = Dropout(p['dOut_4'])(tct3_bn2)

    # Layers of stage 1 expansion

    tet1_Tconv = Conv1DTranspose(int(p['n_fil_3']),
                                 int(p['s_fil_3']),
                                 strides=int(p['stride_3']),
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et1')(tct3_dp)
    tet1_Concat = Concatenate(axis=2)([tet1_Tconv, tct2_conv1])
    tet1_bn1 = BatchNormalization()(tet1_Concat)
    tet1_conv1 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_0')(tet1_bn1)
    tet1_bn2 = BatchNormalization()(tet1_conv1)
    tet1_conv2 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_1')(tet1_bn2)
    tet1_bn3 = BatchNormalization()(tet1_conv2)
    tet1_dp = Dropout(p['dOut_3'])(tet1_bn3)

    #Layers of stage 2 expansion

    tet2_Tconv = Conv1DTranspose(int(p['n_fil_2']),
                                 int(p['s_fil_2']),
                                 strides=int(p['stride_2']),
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et2')(tet1_dp)
    tet2_Concat = Concatenate(axis=2)([tet2_Tconv, tct1_conv1])
    tet2_bn1 = BatchNormalization()(tet2_Concat)
    tet2_conv1 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_0')(tet2_bn1)
    tet2_bn2 = BatchNormalization()(tet2_conv1)
    tet2_conv2 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_1')(tet2_bn2)
    tet2_bn3 = BatchNormalization()(tet2_conv2)
    tet2_dp = Dropout(p['dOut_2'])(tet2_bn3)

    #Layers of stage 3 expansion

    tet3_Tconv = Conv1DTranspose(int(p['n_fil_1']),
                                 int(p['s_fil_1']),
                                 strides=int(p['stride_1']),
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et3')(tet2_dp)
    tet3_Concat = Concatenate(axis=2)([tet3_Tconv, tct0_conv1])
    tet3_bn1 = BatchNormalization()(tet3_Concat)
    tet3_conv1 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_1')(tet3_bn1)
    tet3_bn2 = BatchNormalization()(tet3_conv1)
    tet3_conv2 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_2')(tet3_bn2)
    tet3_bn3 = BatchNormalization()(tet3_conv2)
    tet3_dp = Dropout(p['dOut_5'])(tet3_bn3)
    tet3_conv3 = Conv1D(3,
                        int(p['s_fil_5']),
                        activation='softmax',
                        padding='same',
                        name='Convolution_Et3_3')(tet3_dp)

    model = Model(inputs=inp, outputs=tet3_conv3)
    cce = tf.keras.losses.CategoricalCrossentropy(
        from_logits=True)  #, sample_weight = )
    add = tf.keras.optimizers.Adam(learning_rate=hp.Choice(
        'learning_rate', [1e-2, 1e-3, 1e-4]),
                                   name='Adam')
    model.compile(optimizer=add, loss=cce, metrics=['accuracy'])

    return model
Пример #21
0
def yolo4lite_mobilenet_body(inputs, num_anchors, num_classes, alpha=1.0):
    '''Create YOLO_v4 Lite MobileNet model CNN body in keras.'''
    mobilenet = MobileNet(input_tensor=inputs,
                          weights='imagenet',
                          include_top=False,
                          alpha=alpha)

    # input: 416 x 416 x 3
    # conv_pw_13_relu :13 x 13 x (1024*alpha)
    # conv_pw_11_relu :26 x 26 x (512*alpha)
    # conv_pw_5_relu : 52 x 52 x (256*alpha)

    # f1: 13 x 13 x (1024*alpha) for 416 input
    f1 = mobilenet.get_layer('conv_pw_13_relu').output
    # f2: 26 x 26 x (512*alpha) for 416 input
    f2 = mobilenet.get_layer('conv_pw_11_relu').output
    # f3: 52 x 52 x (256*alpha) for 416 input
    f3 = mobilenet.get_layer('conv_pw_5_relu').output

    f1_channel_num = int(1024 * alpha)
    f2_channel_num = int(512 * alpha)
    f3_channel_num = int(256 * alpha)

    #feature map 1 head (13 x 13 x (512*alpha) for 416 input)
    x1 = make_yolo_spp_depthwise_separable_head(f1,
                                                f1_channel_num // 2,
                                                block_id_str='14')

    #upsample fpn merge for feature map 1 & 2
    x1_upsample = compose(DarknetConv2D_BN_Leaky(f2_channel_num // 2, (1, 1)),
                          UpSampling2D(2))(x1)

    x2 = DarknetConv2D_BN_Leaky(f2_channel_num // 2, (1, 1))(f2)
    x2 = Concatenate()([x2, x1_upsample])

    #feature map 2 head (26 x 26 x (256*alpha) for 416 input)
    x2 = make_yolo_depthwise_separable_head(x2,
                                            f2_channel_num // 2,
                                            block_id_str='15')

    #upsample fpn merge for feature map 2 & 3
    x2_upsample = compose(DarknetConv2D_BN_Leaky(f3_channel_num // 2, (1, 1)),
                          UpSampling2D(2))(x2)

    x3 = DarknetConv2D_BN_Leaky(f3_channel_num // 2, (1, 1))(f3)
    x3 = Concatenate()([x3, x2_upsample])

    #feature map 3 head & output (52 x 52 x (256*alpha) for 416 input)
    #x3, y3 = make_depthwise_separable_last_layers(x3, f3_channel_num//2, num_anchors*(num_classes+5), block_id_str='16')
    x3 = make_yolo_depthwise_separable_head(x3,
                                            f3_channel_num // 2,
                                            block_id_str='16')
    y3 = compose(
        Depthwise_Separable_Conv2D_BN_Leaky(f3_channel_num, (3, 3),
                                            block_id_str='16_3'),
        DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x3)

    #downsample fpn merge for feature map 3 & 2
    x3_downsample = compose(
        ZeroPadding2D(((1, 0), (1, 0))),
        Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f2_channel_num // 2,
                                                    (3, 3),
                                                    strides=(2, 2),
                                                    block_id_str='16_4'))(x3)

    x2 = Concatenate()([x3_downsample, x2])

    #feature map 2 output (26 x 26 x (512*alpha) for 416 input)
    #x2, y2 = make_depthwise_separable_last_layers(x2, f2_channel_num//2, num_anchors*(num_classes+5), block_id_str='17')
    x2 = make_yolo_depthwise_separable_head(x2,
                                            f2_channel_num // 2,
                                            block_id_str='17')
    y2 = compose(
        Depthwise_Separable_Conv2D_BN_Leaky(f2_channel_num, (3, 3),
                                            block_id_str='17_3'),
        DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x2)

    #downsample fpn merge for feature map 2 & 1
    x2_downsample = compose(
        ZeroPadding2D(((1, 0), (1, 0))),
        Darknet_Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num // 2,
                                                    (3, 3),
                                                    strides=(2, 2),
                                                    block_id_str='17_4'))(x2)

    x1 = Concatenate()([x2_downsample, x1])

    #feature map 1 output (13 x 13 x (1024*alpha) for 416 input)
    #x1, y1 = make_depthwise_separable_last_layers(x1, f1_channel_num//2, num_anchors*(num_classes+5), block_id_str='18')
    x1 = make_yolo_depthwise_separable_head(x1,
                                            f1_channel_num // 2,
                                            block_id_str='18')
    y1 = compose(
        Depthwise_Separable_Conv2D_BN_Leaky(f1_channel_num, (3, 3),
                                            block_id_str='18_3'),
        DarknetConv2D(num_anchors * (num_classes + 5), (1, 1)))(x1)

    return Model(inputs, [y1, y2, y3])
Пример #22
0
def u_net_module(p):

    ############################## Special Fun ###################################################

    # embedding = tf.Variable(initial_value = np.load('../Embedding/embedd_w_5_ogt.npy').reshape(1,21,5))

    ############################## Transformation module #################################################

    # Layers of stage 0 contraction

    inp = Input(shape=(1024, ))
    #w = Input(shape=(1024,))

    #tct0_in =  Dot(axes=(2,1))([inp,embedding])
    #tct0_bn1   = BatchNormalization()(inp)#tct0_in)
    #tf.keras.regularizers.L1L2(l1=0.0, l2=0.0)
    x = Embedding(22, 5)(inp)
    x = BatchNormalization()(x)
    tct0_conv1 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_0')(x)
    tct0_bn2 = BatchNormalization()(tct0_conv1)
    tct0_conv2 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_1')(tct0_bn2)
    tct0_bn3 = BatchNormalization()(tct0_conv2)
    tct0_conv3 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        strides=int(p['stride_1']),
                        padding='same',
                        name='Convolution_Ct0_2')(tct0_bn3)
    tct0_bn4 = BatchNormalization()(tct0_conv3)
    #tct0_max   = MaxPool1D(pool_size=2, strides=2)(tct0_bn2)
    tct0_dp = Dropout(p['dOut_1'])(tct0_bn4)

    # Layers of stage 1 contraction

    tct1_conv1 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct1_0')(tct0_dp)
    tct1_bn1 = BatchNormalization()(tct1_conv1)
    tct1_conv2 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        strides=1,
                        padding='same',
                        name='Convolution_Ct1_1')(tct1_bn1)
    tct1_bn2 = BatchNormalization()(tct1_conv2)
    tct1_conv3 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        strides=int(p['stride_2']),
                        padding='same',
                        name='Convolution_Ct1_2')(tct1_bn2)
    tct1_bn3 = BatchNormalization()(tct1_conv3)
    #tct1_max   = MaxPool1D(pool_size=2, strides=2)(tct1_bn2)
    tct1_dp = Dropout(p['dOut_2'])(tct1_bn3)

    # Layers of stage 2 contraction

    tct2_conv1 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct2_0')(tct1_dp)
    tct2_bn1 = BatchNormalization()(tct2_conv1)
    tct2_conv2 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        strides=1,
                        padding='same',
                        name='Convolution_Ct2_1')(tct2_bn1)
    tct2_bn2 = BatchNormalization()(tct2_conv2)
    tct2_conv3 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        strides=int(p['stride_3']),
                        padding='same',
                        name='Convolution_Ct2_2')(tct2_bn2)
    tct2_bn3 = BatchNormalization()(tct2_conv3)
    #tct2_max   = MaxPool1D(pool_size=2, strides=2)(tct2_bn2)
    tct2_dp = Dropout(p['dOut_3'])(tct2_bn3)

    # Layers of stage 3 contraction

    tct3_conv1 = Conv1D(int(p['n_fil_4']),
                        int(p['s_fil_4']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_0')(tct2_dp)
    tct3_bn1 = BatchNormalization()(tct3_conv1)
    tct3_conv2 = Conv1D(int(p['n_fil_4']),
                        int(p['s_fil_4']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_1')(tct3_bn1)
    tct3_bn2 = BatchNormalization()(tct3_conv2)
    tct3_dp = Dropout(p['dOut_4'])(tct3_bn2)

    # Layers of stage 1 expansion

    tet1_Tconv = Conv1DTranspose(int(p['n_fil_3']),
                                 int(p['s_fil_3']),
                                 strides=int(p['stride_3']),
                                 name='TransConv_Et1')(tct3_dp)
    tet1_Concat = Concatenate(axis=2)([tet1_Tconv, tct2_conv1])
    tet1_bn1 = BatchNormalization()(tet1_Concat)
    tet1_conv1 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_0')(tet1_bn1)
    tet1_bn2 = BatchNormalization()(tet1_conv1)
    tet1_conv2 = Conv1D(int(p['n_fil_3']),
                        int(p['s_fil_3']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_1')(tet1_bn2)
    tet1_bn3 = BatchNormalization()(tet1_conv2)
    tet1_dp = Dropout(p['dOut_3'])(tet1_bn3)

    #Layers of stage 2 expansion

    tet2_Tconv = Conv1DTranspose(int(p['n_fil_2']),
                                 int(p['s_fil_2']),
                                 strides=int(p['stride_2']),
                                 name='TransConv_Et2')(tet1_dp)
    tet2_Concat = Concatenate(axis=2)([tet2_Tconv, tct1_conv1])
    tet2_bn1 = BatchNormalization()(tet2_Concat)
    tet2_conv1 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_0')(tet2_bn1)
    tet2_bn2 = BatchNormalization()(tet2_conv1)
    tet2_conv2 = Conv1D(int(p['n_fil_2']),
                        int(p['s_fil_2']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_1')(tet2_bn2)
    tet2_bn3 = BatchNormalization()(tet2_conv2)
    tet2_dp = Dropout(p['dOut_2'])(tet2_bn3)

    #Layers of stage 3 expansion

    tet3_Tconv = Conv1DTranspose(int(p['n_fil_1']),
                                 int(p['s_fil_1']),
                                 strides=int(p['stride_1']),
                                 name='TransConv_Et3')(tet2_dp)
    tet3_Concat = Concatenate(axis=2)([tet3_Tconv, tct0_conv1])
    tet3_bn1 = BatchNormalization()(tet3_Concat)
    tet3_conv1 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_1')(tet3_bn1)
    tet3_bn2 = BatchNormalization()(tet3_conv1)
    tet3_conv2 = Conv1D(int(p['n_fil_1']),
                        int(p['s_fil_1']),
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_2')(tet3_bn2)
    tet3_bn3 = BatchNormalization()(tet3_conv2)
    tet3_dp = Dropout(p['dOut_5'])(tet3_bn3)
    tet3_conv3 = Conv1D(3,
                        int(p['s_fil_5']),
                        activation='softmax',
                        padding='same',
                        name='Convolution_Et3_3')(tet3_dp)
    cce = tf.keras.losses.CategoricalCrossentropy(
        from_logits=True)  # , sample_weight = w)
    add = tf.keras.optimizers.Adam(learning_rate=1e-2, name='Adam')
    model = Model(inputs=inp, outputs=tet3_conv3)
    model.compile(optimizer=add,
                  loss=cce,
                  metrics=['accuracy'],
                  sample_weight_mode="temporal")

    return model
Пример #23
0
def tiny_unit(telescope,
              image_mode,
              image_mask,
              input_img_shape,
              input_features_shape,
              targets,
              target_mode,
              target_shapes=None,
              latent_variables=64,
              conv_layers_blocks=2,
              dense_layer_blocks=3,
              ignore_telescopes=False,
              activity_regularizer_l2=None):
    """Build Deterministic CNN Unit Model
    Parameters
    ==========
        telescope
        image_mode
        image_mask
        input_img_shape
        input_features_shape
        output_mode
        output_shape
    Return
    ======
        keras.Model 
    """
    # Soport lineal only
    if target_mode != 'lineal':
        raise ValueError(f"Invalid target_mode: '{target_mode}'")

    # Image Encoding Block
    ## HexConvLayer
    input_img = Input(name="image_input", shape=input_img_shape)
    if image_mode == "simple-shift":
        front = HexConvLayer(filters=32,
                             kernel_size=(3, 3),
                             name="encoder_hex_conv_layer")(input_img)
    elif image_mode == "simple":
        front = Conv2D(name="encoder_conv_layer_0",
                       filters=32,
                       kernel_size=(3, 3),
                       kernel_initializer="he_uniform",
                       padding="valid",
                       activation="relu")(input_img)
        front = MaxPooling2D(name=f"encoder_max_poolin_layer_0",
                             pool_size=(2, 2))(front)
    else:
        raise ValueError(f"Invalid image mode {image_mode}")

    ## convolutional layers
    conv_kernel_sizes = [3] * conv_layers_blocks
    filters = 32
    for i, kernel_size in enumerate(conv_kernel_sizes, start=1):
        front = Conv2D(name=f"encoder_conv_layer_{i}_a",
                       filters=filters,
                       kernel_size=kernel_size,
                       kernel_initializer="he_uniform",
                       padding="same")(front)
        front = Activation(name=f"encoder_ReLU_{i}_a",
                           activation="relu")(front)
        front = BatchNormalization(name=f"encoder_batchnorm_{i}_a")(front)
        front = Conv2D(name=f"encoder_conv_layer_{i}_b",
                       filters=filters,
                       kernel_size=kernel_size,
                       kernel_initializer="he_uniform",
                       padding="same")(front)
        front = Activation(name=f"encoder_ReLU_{i}_b",
                           activation="relu")(front)
        front = BatchNormalization(name=f"encoder_batchnorm_{i}_b")(front)
        front = MaxPooling2D(name=f"encoder_maxpool_layer_{i}",
                             pool_size=(2, 2))(front)
        filters *= 2

    front = Flatten(name="encoder_flatten_to_latent")(front)

    # Logic Block
    ## extra Telescope Features
    input_params = Input(name="feature_input", shape=input_features_shape)
    if ignore_telescopes:
        input_params = Lambda(lambda x: x * 0)(input_params)
    front = Concatenate()([input_params, front])

    ## dense blocks
    l2_ = lambda activity_regularizer_l2: None if activity_regularizer_l2 is None else l2(
        activity_regularizer_l2)
    for dense_i in range(dense_layer_blocks):
        front = Dense(name=f"logic_dense_{dense_i}",
                      units=latent_variables,
                      kernel_regularizer=l2_(activity_regularizer_l2))(front)
        front = Activation(name=f"logic_ReLU_{dense_i}",
                           activation="relu")(front)
        front = BatchNormalization(name=f"logic_batchnorm_{dense_i}")(front)

    # Outpout block
    output = Dense(len(targets), activation="linear")(front)

    model_name = f"Tiny_Unit_{telescope}"
    model = Model(name=model_name,
                  inputs=[input_img, input_params],
                  outputs=output)
    return model
Пример #24
0
def u_net_module_2(in_):

    ############################## Special Fun ###################################################

    embedding = tf.Variable(
        initial_value=np.load('../Embedding/embedd_w_5.npy').reshape(1, 21, 5),
        trainable=True)

    ############################## Transformation module #################################################

    # Layers of stage 0 contraction

    inp = InputLayer(input_shape=(in_, 21))

    tct0_in = Dot(axes=(2, 1))([inp, embedding])
    tct0_bn1 = BatchNormalization()(tct0_in)
    tct0_conv1 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_0')(tct0_bn1)
    tct0_bn2 = BatchNormalization()(tct0_conv1)
    tct0_conv2 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_1')(tct0_bn2)
    tct0_bn3 = BatchNormalization()(tct0_conv2)
    tct0_conv3 = Conv1D(32,
                        5,
                        activation='relu',
                        strides=2,
                        padding='same',
                        name='Convolution_Ct0_2')(tct0_bn3)
    tct0_bn4 = BatchNormalization()(tct0_conv3)
    #tct0_max   = MaxPool1D(pool_size=2, strides=2)(tct0_bn2)
    tct0_dp = Dropout(0.2)(tct0_bn4)

    # Layers of stage 1 contraction

    tct1_conv1 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct1_0')(tct0_dp)
    tct1_bn1 = BatchNormalization()(tct1_conv1)
    tct1_conv2 = Conv1D(32,
                        5,
                        activation='relu',
                        strides=1,
                        padding='same',
                        name='Convolution_Ct1_1')(tct1_bn1)
    tct1_bn2 = BatchNormalization()(tct1_conv2)
    tct1_conv3 = Conv1D(32,
                        5,
                        activation='relu',
                        strides=2,
                        padding='same',
                        name='Convolution_Ct1_2')(tct1_bn2)
    tct1_bn3 = BatchNormalization()(tct1_conv3)
    #tct1_max   = MaxPool1D(pool_size=2, strides=2)(tct1_bn2)
    tct1_dp = Dropout(0.2)(tct1_bn3)

    # Layers of stage 2 contraction

    tct2_conv1 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct2_0')(tct1_dp)
    tct2_bn1 = BatchNormalization()(tct2_conv1)
    tct2_conv2 = Conv1D(64,
                        5,
                        activation='relu',
                        strides=1,
                        padding='same',
                        name='Convolution_Ct2_1')(tct2_bn1)
    tct2_bn2 = BatchNormalization()(tct2_conv2)
    tct2_conv3 = Conv1D(64,
                        5,
                        activation='relu',
                        strides=2,
                        padding='same',
                        name='Convolution_Ct2_2')(tct2_bn2)
    tct2_bn3 = BatchNormalization()(tct2_conv3)
    #tct2_max   = MaxPool1D(pool_size=2, strides=2)(tct2_bn2)
    tct2_dp = Dropout(0.2)(tct2_bn3)

    # Layers of stage 3 contraction

    tct3_conv1 = Conv1D(128,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_0')(tct2_dp)
    tct3_bn1 = BatchNormalization()(tct3_conv1)
    tct3_conv2 = Conv1D(128,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_1')(tct3_bn1)
    tct3_bn2 = BatchNormalization()(tct3_conv2)
    tct3_dp = Dropout(0.2)(tct3_bn2)

    # Layers of stage 1 expansion

    tet1_Tconv = Conv1DTranspose(64,
                                 5,
                                 strides=2,
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et1')(tct3_dp)
    tet1_Concat = Concatenate(axis=2)([tet1_Tconv, tct2_conv1])
    tet1_bn1 = BatchNormalization()(tet1_Concat)
    tet1_conv1 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_0')(tet1_bn1)
    tet1_bn2 = BatchNormalization()(tet1_conv1)
    tet1_conv2 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_1')(tet1_bn2)
    tet1_bn3 = BatchNormalization()(tet1_conv2)
    tet1_dp = Dropout(0.2)(tet1_bn3)

    #Layers of stage 2 expansion

    tet2_Tconv = Conv1DTranspose(32,
                                 5,
                                 strides=2,
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et2')(tet1_dp)
    tet2_Concat = Concatenate(axis=2)([tet2_Tconv, tct1_conv1])
    tet2_bn1 = BatchNormalization()(tet2_Concat)
    tet2_conv1 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_0')(tet2_bn1)
    tet2_bn2 = BatchNormalization()(tet2_conv1)
    tet2_conv2 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_1')(tet2_bn2)
    tet2_bn3 = BatchNormalization()(tet2_conv2)
    tet2_dp = Dropout(0.2)(tet2_bn3)

    #Layers of stage 3 expansion

    tet3_Tconv = Conv1DTranspose(32,
                                 5,
                                 strides=2,
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et3')(tet2_dp)
    tet3_Concat = Concatenate(axis=2)([tet3_Tconv, tct0_conv1])
    tet3_bn1 = BatchNormalization()(tet3_Concat)
    tet3_conv1 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_1')(tet3_bn1)
    tet3_bn2 = BatchNormalization()(tet3_conv1)
    tet3_conv2 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_2')(tet3_bn2)
    tet3_bn3 = BatchNormalization()(tet3_conv2)
    tet3_dp = Dropout(0.1)(tet3_bn3)
    tet3_conv3 = Conv1D(3,
                        5,
                        activation='softmax',
                        padding='same',
                        name='Convolution_Et3_3')(tet3_dp)

    ################################################ Stage 2 ######################################################

    tet3_Concat2 = Concatenate(axis=2)([tct0_in, tet3_conv3])
    tct0_bn12 = BatchNormalization()(tet3_Concat2)
    tct0_conv12 = Conv1D(32,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Ct0_02')(tct0_bn12)
    tct0_bn22 = BatchNormalization()(tct0_conv12)
    tct0_conv22 = Conv1D(32,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Ct0_12')(tct0_bn22)
    tct0_bn32 = BatchNormalization()(tct0_conv22)
    tct0_conv32 = Conv1D(32,
                         5,
                         activation='relu',
                         strides=2,
                         padding='same',
                         name='Convolution_Ct0_22')(tct0_bn32)
    tct0_bn42 = BatchNormalization()(tct0_conv32)
    #tct0_max   = MaxPool1D(pool_size=2, strides=2)(tct0_bn2)
    tct0_dp2 = Dropout(0.2)(tct0_bn42)

    # Layers of stage 1 contraction

    tct1_conv12 = Conv1D(32,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Ct1_02')(tct0_dp2)
    tct1_bn12 = BatchNormalization()(tct1_conv12)
    tct1_conv22 = Conv1D(32,
                         5,
                         activation='relu',
                         strides=1,
                         padding='same',
                         name='Convolution_Ct1_12')(tct1_bn12)
    tct1_bn22 = BatchNormalization()(tct1_conv22)
    tct1_conv32 = Conv1D(32,
                         5,
                         activation='relu',
                         strides=2,
                         padding='same',
                         name='Convolution_Ct1_22')(tct1_bn22)
    tct1_bn32 = BatchNormalization()(tct1_conv32)
    #tct1_max   = MaxPool1D(pool_size=2, strides=2)(tct1_bn2)
    tct1_dp2 = Dropout(0.2)(tct1_bn32)

    # Layers of stage 2 contraction

    tct2_conv12 = Conv1D(64,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Ct2_02')(tct1_dp2)
    tct2_bn12 = BatchNormalization()(tct2_conv12)
    tct2_conv22 = Conv1D(64,
                         5,
                         activation='relu',
                         strides=1,
                         padding='same',
                         name='Convolution_Ct2_12')(tct2_bn12)
    tct2_bn22 = BatchNormalization()(tct2_conv22)
    tct2_conv32 = Conv1D(64,
                         5,
                         activation='relu',
                         strides=2,
                         padding='same',
                         name='Convolution_Ct2_22')(tct2_bn22)
    tct2_bn32 = BatchNormalization()(tct2_conv32)
    #tct2_max   = MaxPool1D(pool_size=2, strides=2)(tct2_bn2)
    tct2_dp2 = Dropout(0.2)(tct2_bn32)

    # Layers of stage 3 contraction

    tct3_conv12 = Conv1D(128,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Ce3_02')(tct2_dp2)
    tct3_bn12 = BatchNormalization()(tct3_conv12)
    tct3_conv22 = Conv1D(128,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Ce3_12')(tct3_bn12)
    tct3_bn22 = BatchNormalization()(tct3_conv22)
    tct3_dp2 = Dropout(0.2)(tct3_bn22)

    # Layers of stage 1 expansion

    tet1_Tconv2 = Conv1DTranspose(64,
                                  5,
                                  strides=2,
                                  activation='relu',
                                  padding='same',
                                  name='TransConv_Et12')(tct3_dp2)
    tet1_Concat2 = Concatenate(axis=2)([tet1_Tconv2, tct2_conv12])
    tet1_bn12 = BatchNormalization()(tet1_Concat2)
    tet1_conv12 = Conv1D(64,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Et1_02')(tet1_bn12)
    tet1_bn22 = BatchNormalization()(tet1_conv12)
    tet1_conv22 = Conv1D(64,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Et1_12')(tet1_bn22)
    tet1_bn32 = BatchNormalization()(tet1_conv22)
    tet1_dp2 = Dropout(0.2)(tet1_bn32)

    #Layers of stage 2 expansion

    tet2_Tconv2 = Conv1DTranspose(32,
                                  5,
                                  strides=2,
                                  activation='relu',
                                  padding='same',
                                  name='TransConv_Et22')(tet1_dp2)
    tet2_Concat2 = Concatenate(axis=2)([tet2_Tconv2, tct1_conv12])
    tet2_bn12 = BatchNormalization()(tet2_Concat2)
    tet2_conv12 = Conv1D(32,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Et2_02')(tet2_bn12)
    tet2_bn22 = BatchNormalization()(tet2_conv12)
    tet2_conv22 = Conv1D(32,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Et2_12')(tet2_bn22)
    tet2_bn32 = BatchNormalization()(tet2_conv22)
    tet2_dp2 = Dropout(0.2)(tet2_bn32)

    #Layers of stage 3 expansion

    tet3_Tconv2 = Conv1DTranspose(32,
                                  5,
                                  strides=2,
                                  activation='relu',
                                  padding='same',
                                  name='TransConv_Et32')(tet2_dp2)
    tet3_Concat2 = Concatenate(axis=2)([tet3_Tconv2, tct0_conv12])
    tet3_bn12 = BatchNormalization()(tet3_Concat2)
    tet3_conv12 = Conv1D(32,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Et3_12')(tet3_bn12)
    tet3_bn22 = BatchNormalization()(tet3_conv12)
    tet3_conv22 = Conv1D(32,
                         5,
                         activation='relu',
                         padding='same',
                         name='Convolution_Et3_22')(tet3_bn22)
    tet3_bn32 = BatchNormalization()(tet3_conv22)
    tet3_dp2 = Dropout(0.1)(tet3_bn32)
    tet3_conv32 = Conv1D(3,
                         5,
                         activation='softmax',
                         padding='same',
                         name='Convolution_Et3_32')(tet3_dp2)

    model = Model(inputs=inp, outputs=[tet3_conv3, tet3_conv32])

    return model
Пример #25
0
    x2, y2, y3, train_size=0.6, shuffle=False)

m1_input = Input(shape=(3, ), name='model1_input')
m1_dense1 = Dense(50, activation='relu', name='m1_d1')(m1_input)
m1_dense2 = Dense(45, activation='relu', name='m1_d2')(m1_dense1)
m1_dense3 = Dense(40, activation='relu', name='m1_d3')(m1_dense2)
m1_output = Dense(35, name='m1_out')(m1_dense3)

m2_input = Input(shape=(3, ))
m2_dense1 = Dense(50, activation='relu', name='m2_d1')(m2_input)
m2_dense2 = Dense(45, activation='relu', name='m2_d2')(m2_dense1)
m2_dense3 = Dense(40, activation='relu', name='m2_d3')(m2_dense2)
m2_output = Dense(35, name='m2_out')(m2_dense3)

#model의 병합
merge1 = Concatenate()([m1_output, m2_output])
# merge1 =  Concatenate(axis=1)([m1_output, m2_output])
# merge1 =  Concatenate(axis=0)([m1_output, m2_output])

middle1 = Dense(33, name='middle_d1')(merge1)
middle2 = Dense(31, name='middle_d2')(middle1)
middle3 = Dense(29, name='middle_d3')(middle2)

#output 모델
output1 = Dense(25, name='out1_d1')(middle3)
output1 = Dense(10, name='out1_d2')(output1)
output1 = Dense(3, name='out1_d3')(output1)

output2 = Dense(25, name='out2_d1')(middle3)
output2 = Dense(10, name='out2_d2')(output2)
output2 = Dense(3, name='out2_d3')(output2)
Пример #26
0
def cnn_cycle_gan():

    ############################## Transformation module #################################################

    # Layers of stage 0 contraction

    InputLayer(input_shape=(in_, 21))
    tct0_conv1 = Conv1D(16,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_1')(tct0_in)
    tct0_bn1 = BatchNormalization()(ct0_conv1)
    tct0_conv2 = Conv1D(16,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_2')(tct0_bn1)
    tct0_bn2 = BatchNormalization()(tct0_conv2)
    tct0_max = MaxPool1D(pool_size=2, strides=2)(tct0_bn2)
    tct0_dp = Dropout(0.2)(tct0_max)

    # Layers of stage 1 contraction

    tct1_conv1 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct1_1')(tct0_dp)
    tct1_bn1 = BatchNormalization()(tct1_conv1)
    tct1_conv2 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct1_2')(tct1_bn1)
    tct1_bn2 = BatchNormalization()(tct1_conv2)
    tct1_max = MaxPool1D(pool_size=2, strides=2)(tct1_bn2)
    tct1_dp = Dropout(0.2)(tct1_max)

    # Layers of stage 2 contraction

    tct2_conv1 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct2_1')(tct1_dp)
    tct2_bn1 = BatchNormalization()(tct2_conv1)
    tct2_conv2 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct2_2')(tct2_bn1)
    tct2_bn2 = BatchNormalization()(tct2_conv2)
    tct2_max = MaxPool1D(pool_size=2, strides=2)(tct2_bn2)
    tct2_dp = Dropout(0.2)(tct2_max)

    # Layers of stage 3 contraction

    tct3_conv1 = Conv1D(128,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_1')(tct2_dp)
    tct3_bn1 = BatchNormalization()(tct3_conv1)
    tct3_conv2 = Conv1D(128,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_2')(tct3_bn1)
    tct3_bn2 = BatchNormalization()(tct3_conv2)
    tct3_max = MaxPool1D(pool_size=2, strides=2)(tct3_bn2)
    tct3_dp = Dropout(0.2)(tct3_max)

    # Layers of stage 1 expansion

    tet1_Tconv = Conv1DTranspose(64,
                                 5,
                                 strides=2,
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et1')(tct3_dp)
    tet1_Concat = Concatenate(axis=1)([tet1_Tconv, tct3_conv2])
    tet1_bn1 = BatchNormalization()(et1_Concat)
    tet1_conv1 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_1')(tet1_bn1)
    tet1_bn2 = BatchNormalization()(tet1_conv1)
    tet1_conv2 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_2')(tet1_bn2)
    tet1_dp = Dropout(0.2)(tet1_conv2)

    #Layers of stage 2 expansion

    tet2_Tconv = Conv1DTranspose(32,
                                 5,
                                 strides=2,
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et2')(tet1_dp)
    tet2_Concat = Concatenate(axis=1)([tet2_Tconv, tct1_conv2])
    tet2_bn1 = BatchNormalization()(tet2_Concat)
    tet2_conv1 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_1')(tet2_bn1)
    tet2_bn2 = BatchNormalization()(tet2_conv1)
    tet2_conv2 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_2')(tet2_bn2)
    tet2_dp = Dropout(0.2)(tet2_conv2)

    #Layers of stage 3 expansion

    tet3_Tconv = Conv1DTranspose(16,
                                 5,
                                 strides=2,
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et3')(tet2_dp)
    tet3_Concat = Concatenate(axis=1)([tet3_Tconv, tce0_conv2])
    tet3_bn1 = BatchNormalization()(tet3_Concat)
    tet3_conv1 = Conv1D(16,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_1')(tet3_bn1)
    tet3_bn2 = BatchNormalization()(tet3_conv1)
    tet3_conv2 = Conv1D(16,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_2')(tet3_bn2)
    tet3_bn3 = BatchNormalization()(tet3_conv2)
    tet3_dp = Dropout(0.1)(tet3_bn3)
    tet3_conv3 = Conv1D(1,
                        5,
                        activation='sigmoid',
                        padding='same',
                        name='Convolution_Et3_3')(tet3_dp)

    ########################################## Reconstroction Module ######################################

    # Layers of stage 0 contraction

    rct0_conv1 = Conv1D(16,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_1')(tet3_conv3)
    rct0_bn1 = BatchNormalization()(rct0_conv1)
    rct0_conv2 = Conv1D(16,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct0_2')(rct0_bn1)
    rct0_bn2 = BatchNormalization()(rct0_conv2)
    rct0_max = MaxPool1D(pool_size=2, strides=2)(rct0_bn2)
    rct0_dp = Dropout(0.2)(rct0_max)

    # Layers of stage 1 contraction

    rct1_conv1 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct1_1')(rct0_dp)
    rct1_bn1 = BatchNormalization()(rct1_conv1)
    rct1_conv2 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct1_2')(rct1_bn1)
    rct1_bn2 = BatchNormalization(rct1_conv2)
    rct1_max = MaxPool1D(pool_size=2, strides=2)(rct1_bn2)
    rct1_dp = Dropout(0.2)(rct1_max)

    # Layers of stage 2 contraction

    rct2_conv1 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct2_1')(rct1_dp)
    rct2_bn1 = BatchNormalization()(rct2_conv1)
    rct2_conv2 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ct2_2')(rct2_bn1)
    rct2_bn2 = BatchNormalization()(rct2_conv2)
    rct2_max = MaxPool1D(pool_size=2, strides=2)(rct2_bn2)
    rct2_dp = Dropout(0.2)(rct2_max)

    # Layers of stage 3 contraction

    rct3_conv1 = Conv1D(128,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_1')(rct2_dp)
    rct3_bn1 = BatchNormalization()(rct3_conv1)
    rct3_conv2 = Conv1D(128,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Ce3_2')(rct3_bn1)
    rct3_bn2 = BatchNormalization()(rct3_conv2)
    rct3_max = MaxPool1D(pool_size=2, strides=2)(rct3_bn2)
    rct3_dp = Dropout(0.2)(rct3_max)

    # Layers of stage 1 expansion

    ret1_Tconv = Conv1DTranspose(64,
                                 5,
                                 strides=2,
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et1')(rct3_dp)
    ret1_Concat = Concatenate(axis=1)([ret1_Tconv, rct3_conv2])
    ret1_bn1 = BatchNormalization()(ret1_Concat)
    ret1_conv1 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_1')(ret1_bn1)
    ret1_bn2 = BatchNormalization()(ret1_conv1)
    ret1_conv2 = Conv1D(64,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et1_2')(ret1_bn2)
    ret1_dp = Dropout(0.2)(ret1_conv2)

    #Layers of stage 2 expansion

    ret2_Tconv = Conv1DTranspose(32,
                                 5,
                                 strides=2,
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et2')(ret1_dp)
    ret2_Concat = Concatenate(axis=1)([ret2_Tconv, rct1_conv2])
    ret2_bn1 = BatchNormalization()(ret2_Concat)
    ret2_conv1 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_1')(ret2_bn1)
    ret2_bn2 = BatchNormalization()(ret2_conv1)
    ret2_conv2 = Conv1D(32,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et2_2')(ret2_bn2)
    ret2_dp = Dropout(0.2)(ret2_conv2)

    #Layers of stage 3 expansion

    ret3_Tconv = Conv1DTranspose(16,
                                 5,
                                 strides=2,
                                 activation='relu',
                                 padding='same',
                                 name='TransConv_Et3')(ret2_dp)
    ret3_Concat = Concatenate(axis=1)([ret3_Tconv, rce0_conv2])
    ret3_bn1 = BatchNormalization()(ret3_Concat)
    ret3_conv1 = Conv1D(16,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_1')(ret3_bn1)
    ret3_bn2 = BatchNormalization()(ret3_conv1)
    ret3_conv2 = Conv1D(16,
                        5,
                        activation='relu',
                        padding='same',
                        name='Convolution_Et3_2')(ret3_bn2)
    ret3_bn3 = BatchNormalization()(ret3_conv2)
    ret3_dp = Dropout(0.1)(ret3_bn3)
    ret3_conv2 = Conv1D(1,
                        5,
                        activation='sigmoid',
                        padding='same',
                        name='Convolution_Et3_3')(ret3_dp)

    ######################################## Discriminator Module ###############################################

    distrib_Y = Input(shape=(2000, 20))
Пример #27
0
def khop_model_share(): # input/output = num of sensors 
    sensor_matrix1 = Input(shape=(num_sensors, num_sensors))
    sensor_matrix2 = Input(shape=(num_sensors, num_sensors))
    #sensor_matrix3 = Input(shape=(num_sensors, num_sensors))
    s_input1 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input2 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input3 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input4 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input5 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input6 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input7 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input8 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    s_input9 = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))
    
    s_cnn = sensor_cnn(input_shape, repetitions = [2,2,2,2])
    extract_cnn1 = s_cnn(s_input1)
    extract_cnn2 = s_cnn(s_input2)
    extract_cnn3 = s_cnn(s_input3)
    extract_cnn4 = s_cnn(s_input4)
    extract_cnn5 = s_cnn(s_input5)
    extract_cnn6 = s_cnn(s_input6)
    extract_cnn7 = s_cnn(s_input7)
    extract_cnn8 = s_cnn(s_input8)
    extract_cnn9 = s_cnn(s_input9)
    
    extract_cnn = Concatenate(axis=1)([extract_cnn1, extract_cnn2, extract_cnn3, 
                                       extract_cnn4, extract_cnn5, extract_cnn6,
                                       extract_cnn7, extract_cnn8, extract_cnn9])
        
    #extract_cnn = np.reshape(extract_cnn, (-1,))
    G_h1 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix1])
    G_h2 = GraphConv(256, 'relu')([extract_cnn, sensor_matrix2])
    G_1 = Concatenate(axis=-1)([G_h1, G_h2])
  
    G_2h1 = GraphConv(256, 'relu')([G_1, sensor_matrix1])
    G_2h2 = GraphConv(256, 'relu')([G_1, sensor_matrix2])
    G_2 = Concatenate(axis=-1)([G_2h1, G_2h2])
    
    gnn_output = tf.split(G_2, num_sensors, 1)
    
    output1 = Dense(32, activation='relu')(Flatten()(gnn_output[0]))
    output1 = Dense(2, activation='linear', name='sensor_1')(output1)
    
    output2 = Dense(32, activation='relu')(Flatten()(gnn_output[1]))
    output2 = Dense(2, activation='linear', name='sensor_2')(output2)
    
    output3 = Dense(32, activation='relu')(Flatten()(gnn_output[2]))
    output3 = Dense(2, activation='linear', name='sensor_3')(output3)                                                         
                                                             
    output4 = Dense(32, activation='relu')(Flatten()(gnn_output[3]))
    output4 = Dense(2, activation='linear', name='sensor_4')(output4)
    
    output5 = Dense(32, activation='relu')(Flatten()(gnn_output[4]))
    output5 = Dense(2, activation='linear', name='sensor_5')(output5)

    output6 = Dense(32, activation='relu')(Flatten()(gnn_output[5]))
    output6 = Dense(2, activation='linear', name='sensor_6')(output6)
    
    output7 = Dense(32, activation='relu')(Flatten()(gnn_output[6]))
    output7 = Dense(2, activation='linear', name='sensor_7')(output7)

    output8 = Dense(32, activation='relu')(Flatten()(gnn_output[7]))
    output8 = Dense(2, activation='linear', name='sensor_8')(output8)
    
    output9 = Dense(32, activation='relu')(Flatten()(gnn_output[8]))
    output9 = Dense(2, activation='linear', name='sensor_9')(output9)
    
    model = Model(inputs=[s_input1, s_input2, s_input3, s_input4,
                          s_input5, s_input6, s_input7, s_input8, s_input9,
                          sensor_matrix1, sensor_matrix2], 
                  outputs= [output1,output2,output3,output4,
                            output5,output6,output7,output8,output9])
    return model
Пример #28
0
 def __init__(self):
     super(Concatenate_, self).__init__()
     self.concat = Concatenate(axis=-1)
Пример #29
0
def define_ensemble_model(input_size, lr=1e-3):
    try:
        df = pd.read_pickle('train_data_df.pkl')
    except:
        FIRST_DAY =  350# If you want to load all the data set it to '1' -->  Great  memory overflow  risk !

        df = create_dt(is_train=True, first_day= FIRST_DAY)
        create_fea(df)
        df.dropna(inplace = True)
        df.to_pickle('train_data_df.pkl')
        # del df; gc.collect()

    inputs = Input(shape=(input_size, ), name='inputs')

    # Embedding input
    wday_input = Input(shape=(1,), name='wday')
    month_input = Input(shape=(1,), name='month')
    year_input = Input(shape=(1,), name='year')
    mday_input = Input(shape=(1,), name='mday')
    quarter_input = Input(shape=(1,), name='quarter')
    event_name_1_input = Input(shape=(1,), name='event_name_1')
    event_type_1_input = Input(shape=(1,), name='event_type_1')
    event_name_2_input = Input(shape=(1,), name='event_name_2')
    event_type_2_input = Input(shape=(1,), name='event_type_2')
    item_id_input = Input(shape=(1,), name='item_id')
    dept_id_input = Input(shape=(1,), name='dept_id')
    store_id_input = Input(shape=(1,), name='store_id')
    cat_id_input = Input(shape=(1,), name='cat_id')
    state_id_input = Input(shape=(1,), name='state_id')
    snap_CA_input = Input(shape=(1,), name='snap_CA')
    snap_TX_input = Input(shape=(1,), name='snap_TX')
    snap_WI_input = Input(shape=(1,), name='snap_WI')


    wday_emb = Flatten()(Embedding(7, 1)(wday_input))
    month_emb = Flatten()(Embedding(12, 2)(month_input))
    year_emb = Flatten()(Embedding(6, 1)(year_input))
    mday_emb = Flatten()(Embedding(31, 2)(mday_input))
    quarter_emb = Flatten()(Embedding(4, 1)(quarter_input))
    event_name_1_emb = Flatten()(Embedding(31, 2)(event_name_1_input))
    event_type_1_emb = Flatten()(Embedding(5, 1)(event_type_1_input))
    event_name_2_emb = Flatten()(Embedding(5, 1)(event_name_2_input))
    event_type_2_emb = Flatten()(Embedding(5, 1)(event_type_2_input))

    item_id_emb = Flatten()(Embedding(3049, 4)(item_id_input))
    dept_id_emb = Flatten()(Embedding(7, 1)(dept_id_input))
    store_id_emb = Flatten()(Embedding(10, 1)(store_id_input))
    cat_id_emb = Flatten()(Embedding(6, 1)(cat_id_input))
    state_id_emb = Flatten()(Embedding(3, 1)(state_id_input))

    x = Concatenate(-1)([inputs, wday_emb, month_emb, month_emb, year_emb, mday_emb, \
                        quarter_emb, event_name_1_emb, event_type_1_emb, event_name_2_emb, \
                        event_type_2_emb, item_id_emb, dept_id_emb, store_id_emb, cat_id_emb, \
                        state_id_emb])

    input_dic = {
        'inputs': inputs, 'wday': wday_input, 'month': month_input, 'year': year_input,
        'mday': mday_input, 'quarter': quarter_input, 'event_name_1': event_name_1_input,
        'event_type_1': event_type_1_input, 'event_name_2': event_name_2_input,
        'event_type_2': event_type_2_input, 'item_id': item_id_input, 'dept_id': dept_id_input,
        'store_id': store_id_input, 'cat_id': cat_id_input, 'state_id': state_id_input,

    }

    models = [predict_model(len(input_dense)), predict_model(len(input_dense)), predict_model(len(input_dense))]
    models[0].load_weights('./m5_predict5_mse.h5')
    models[1].load_weights('./m5_predict5_rmsse.h5')
    models[2].load_weights('./m5_predict5_wrmsse.h5')

    i = 0
    for model in models:
        for layer in model.layers:
            layer.trainable = False
            # layer.name = 'ensemble_' + str(i) + layer.name
        i += 1
    
    ensemble_outputs = [model(input_dic) for model in models]
    merge = Concatenate(-1)(ensemble_outputs)
    merge = Concatenate(-1)([x, merge])
    x = Dense(64, activation='relu')(merge)
    x = BatchNormalization()(x)
    x = Dense(32, activation='relu')(x)
    outputs = Dense(1, activation='linear')(x)

    model = Model(inputs=input_dic, outputs=outputs)
    model.compile(optimizer=Adam(lr=lr), loss=rmse)
    return model
Пример #30
0
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
    """Adds a Inception-ResNet block.
    This function builds 3 types of Inception-ResNet blocks mentioned
    in the paper, controlled by the `block_type` argument (which is the
    block name used in the official TF-slim implementation):
        - Inception-ResNet-A: `block_type='block35'`
        - Inception-ResNet-B: `block_type='block17'`
        - Inception-ResNet-C: `block_type='block8'`
    # Arguments
        x: input tensor.
        scale: scaling factor to scale the residuals (i.e., the output of
            passing `x` through an inception module) before adding them
            to the shortcut branch. Let `r` be the output from the residual branch,
            the output of this block will be `x + scale * r`.
        block_type: `'block35'`, `'block17'` or `'block8'`, determines
            the network structure in the residual branch.
        block_idx: an `int` used for generating layer names. The Inception-ResNet blocks
            are repeated many times in this network. We use `block_idx` to identify
            each of the repetitions. For example, the first Inception-ResNet-A block
            will have `block_type='block35', block_idx=0`, ane the layer names will have
            a common prefix `'block35_0'`.
        activation: activation function to use at the end of the block
            (see [activations](../activations.md)).
            When `activation=None`, no activation is applied
            (i.e., "linear" activation: `a(x) = x`).
    # Returns
        Output tensor for the block.
    # Raises
        ValueError: if `block_type` is not one of `'block35'`,
            `'block17'` or `'block8'`.
    """
    if block_type == 'block35':
        branch_0 = conv2d_bn(x, 32, 1)
        branch_1 = conv2d_bn(x, 32, 1)
        branch_1 = conv2d_bn(branch_1, 32, 3)
        branch_2 = conv2d_bn(x, 32, 1)
        branch_2 = conv2d_bn(branch_2, 48, 3)
        branch_2 = conv2d_bn(branch_2, 64, 3)
        branches = [branch_0, branch_1, branch_2]
    elif block_type == 'block17':
        branch_0 = conv2d_bn(x, 192, 1)
        branch_1 = conv2d_bn(x, 128, 1)
        branch_1 = conv2d_bn(branch_1, 160, [1, 7])
        branch_1 = conv2d_bn(branch_1, 192, [7, 1])
        branches = [branch_0, branch_1]
    elif block_type == 'block8':
        branch_0 = conv2d_bn(x, 192, 1)
        branch_1 = conv2d_bn(x, 192, 1)
        branch_1 = conv2d_bn(branch_1, 224, [1, 3])
        branch_1 = conv2d_bn(branch_1, 256, [3, 1])
        branches = [branch_0, branch_1]
    else:
        raise ValueError('Unknown Inception-ResNet block type. '
                         'Expects "block35", "block17" or "block8", '
                         'but got: ' + str(block_type))

    block_name = block_type + '_' + str(block_idx)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
    mixed = Concatenate(axis=channel_axis,
                        name=block_name + '_mixed')(branches)
    up = conv2d_bn(mixed,
                   K.int_shape(x)[channel_axis],
                   1,
                   activation=None,
                   use_bias=True,
                   name=block_name + '_conv')

    x = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
               output_shape=K.int_shape(x)[1:],
               arguments={'scale': scale},
               name=block_name)([x, up])

    if activation is not None:
        x = Activation(activation, name=block_name + '_ac')(x)
    return x