def build_discriminator(self): d_input = Input(shape=(self.latent_dim,)) layers = [] layers.append(Dense(units=self.latent_dim, input_dim=self.latent_dim)) layers.append(LeakyReLU(alpha=0.2)) layers.append(Dense(self.latent_dim//2)) layers.append(LeakyReLU(alpha=0.2)) layers.append(Dense(1, activation='sigmoid')) return d_input, layers
def basic_cnn(word_size=300, sentence_size=100, num_filters=100, filter_sizes=[2, 4, 6]): model = Sequential() layers = [] for f in range(len(filter_sizes)): if f == 0: layers.append( Conv1D(num_filters, kernel_size=filter_sizes[f], padding='valid', activation='relu', kernel_initializer='he_uniform', input_shape=(sentence_size, word_size))) else: layers.append( Conv1D(num_filters, kernel_size=filter_sizes[f], padding='valid', activation='relu', kernel_initializer='he_uniform')) layers.append(MaxPooling1D(pool_size=2)) layers.append(Dropout(0.5)) layers.append(Flatten()) layers.append(Dense(100, activation='relu')) layers.append(Dropout(0.5)) # layers.append(Dense(50, activation='relu')) # layers.append(Dropout(0.5)) layers.append(Dense(2, activation='softmax')) for layer in layers: model.add(layer) return model
def build_generator(self): noise = Input(shape=(self.noise_dims,)) output_dim = self.latent_dim layers = [] a = LeakyReLU(alpha=0.2) # a = Activation('relu') layers.append(Dense(self.noise_dims)) layers.append(a) layers.append(BatchNormalization(momentum=0.8)) layers.append(Dense(output_dim)) layers.append(a) layers.append(BatchNormalization(momentum=0.8)) layers.append(Dense(output_dim)) layers.append(a) layers.append(BatchNormalization(momentum=0.8)) layers.append(Dense(output_dim, activation='tanh')) return noise, layers
def create_shared_inceptionV4_network_2(): kwargs = { 'padding': 'same', 'dropout': 0.0, 'BN': True, 'kernel_initializer': 'glorot_uniform' } input = [] input.append(Input(shape=(350, 38, 1), name='Wire_1')) input.append(Input(shape=(350, 38, 1), name='Wire_2')) layers = [] layers.append(Conv_block(32, filter_size=(3, 3), name='1', **kwargs)) layers.append(Conv_block(32, filter_size=(3, 3), name='2', **kwargs)) layers.append([MaxPooling2D((4, 1), name='3')]) #(4,2) layers.append(Conv_block(64, filter_size=(3, 3), name='4', **kwargs)) num_filters = (96, (64, 96), (64, 96), 96 ) #TODO Real values from IncV1 Paper? layers.append(InceptionV4_block(num_filters=num_filters, name='5')) layers.append(InceptionV4_block(num_filters=num_filters, name='6')) layers.append([MaxPooling2D((2, 2), name='7')]) layers.append(InceptionV4_block(num_filters=num_filters, name='8')) layers.append(InceptionV4_block(num_filters=num_filters, name='9')) layers.append([MaxPooling2D((2, 1), name='10')]) layers.append(InceptionV4_block(num_filters=num_filters, name='11')) layers.append(InceptionV4_block(num_filters=num_filters, name='12')) layers.append([GlobalAveragePooling2D(name='18')]) paths = assemble_network(input, layers) merge = Concatenate(name='Conc_Top_1_and_2')(paths) output = Dense(2, name='Output_Top', activation='softmax', kernel_initializer="glorot_uniform")(merge) return Model(inputs=input, outputs=output)
def create_shared_inception_network_4(): kwargs = { 'padding': 'same', 'dropout': 0.0, 'BN': True, 'kernel_initializer': 'glorot_uniform' } inputU = [] inputU.append(Input(shape=(350, 38, 1), name='U-Wire_1')) inputU.append(Input(shape=(350, 38, 1), name='U-Wire_2')) inputV = [] inputV.append(Input(shape=(350, 38, 1), name='V-Wire_1')) inputV.append(Input(shape=(350, 38, 1), name='V-Wire_2')) layersU = [] layersV = [] for layers in [ layersU, layersV ]: # Use same architecture for U and V wires. Can be changed layers.append(Conv_block(32, filter_size=(3, 3), **kwargs)) layers.append(Conv_block(32, filter_size=(3, 3), **kwargs)) layers.append([MaxPooling2D((4, 2))]) layers.append(Conv_block(64, filter_size=(3, 3), **kwargs)) num_filters = (64, (96, 128), (16, 32), 32 ) #TODO Real values from IncV1 Paper? num_filters_deep = (96, (96, 192), (64, 96), 96) layers.append(InceptionV1_block(num_filters=num_filters)) layers.append(InceptionV1_block(num_filters=num_filters)) layers.append([ MaxPooling2D((2, 2)) ]) # TODO Test Inception module with stride=2 instead of max pooling layers.append(InceptionV1_block(num_filters=num_filters)) layers.append(InceptionV1_block(num_filters=num_filters)) layers.append([MaxPooling2D((2, 1))]) layers.append(InceptionV1_block(num_filters=num_filters)) layers.append(InceptionV1_block(num_filters=num_filters)) # layers.append([MaxPooling2D((2, 1))]) # layers.append(InceptionV1_block(num_filters=num_filters)) # layers.append(InceptionV1_block(num_filters=num_filters)) # layers.append(InceptionV1_block(num_filters=num_filters_deep)) # layers.append(InceptionV1_block(num_filters=num_filters_deep)) layers.append([GlobalAveragePooling2D()]) pathsU = assemble_network(inputU, layersU) pathsV = assemble_network(inputV, layersV) inputUV = [] inputUV.append(Concatenate(name='TPC_1')([pathsU[0], pathsV[0]])) inputUV.append(Concatenate(name='TPC_2')([pathsU[1], pathsV[1]])) layersUV = [] layersUV.append([Dense(64, activation='relu')]) layersUV.append([Dense(16, activation='relu')]) pathsUV = assemble_network(inputUV, layersUV) merge = Concatenate(name='Flat_1_and_2')(pathsUV) output = Dense(2, name='Output', activation='softmax', kernel_initializer="glorot_uniform")(merge) inputUV = [] inputUV.append(inputU[0]) inputUV.append(inputV[0]) inputUV.append(inputU[1]) inputUV.append(inputV[1]) return Model(inputs=inputUV, outputs=output)
def create_shared_inception_network_2(): # kwargs = {'padding': 'same', # 'dropout': 0.2, #TODO TEST DROPOUT # 'BN': True, # 'kernel_initializer': 'glorot_uniform'} # # input = [] # input.append(Input(shape=(350, 38, 1), name='Wire_1')) # input.append(Input(shape=(350, 38, 1), name='Wire_2')) # # layers = [] # layers.append(Conv_block(32, filter_size=(3, 3), name='1', **kwargs)) # layers.append(Conv_block(32, filter_size=(3, 3), name='2', **kwargs)) # layers.append([MaxPooling2D((4, 2), name='3')]) # layers.append(Conv_block(64, filter_size=(3, 3), name='4', **kwargs)) # # num_filters = (64, (96, 128), (16, 32), 32) #TODO Real values from IncV1 Paper? # layers.append(InceptionV1_block(num_filters=num_filters, name='5')) # layers.append(InceptionV1_block(num_filters=num_filters, name='6')) # layers.append([MaxPooling2D((2, 2), name='7')]) # TODO Test Inception module with stride=2 instead of max pooling # layers.append(InceptionV1_block(num_filters=num_filters, name='8')) # layers.append(InceptionV1_block(num_filters=num_filters, name='9')) # layers.append([MaxPooling2D((2, 1), name='10')]) # layers.append(InceptionV1_block(num_filters=num_filters, name='11')) # layers.append(InceptionV1_block(num_filters=num_filters, name='12')) # layers.append([MaxPooling2D((2, 1), name='13')]) # layers.append(InceptionV1_block(num_filters=num_filters, name='14')) # layers.append(InceptionV1_block(num_filters=num_filters, name='15')) # layers.append(InceptionV1_block(num_filters=num_filters, name='16')) # layers.append(InceptionV1_block(num_filters=num_filters, name='17')) # # layers.append([GlobalAveragePooling2D(name='18')]) # # paths = assemble_network(input, layers) # # merge = Concatenate(name='Conc_Top_1_and_2')(paths) # output = Dense(2, name='Output_Top', activation='softmax', kernel_initializer="glorot_uniform")(merge) # # return Model(inputs=input, outputs=output) # TODO Baseline below kwargs = { 'padding': 'same', 'dropout': 0.0, 'BN': True, 'kernel_initializer': 'glorot_uniform' } input = [] input.append(Input(shape=(350, 38, 1), name='Wire_1')) input.append(Input(shape=(350, 38, 1), name='Wire_2')) layers = [] layers.append(Conv_block(32, filter_size=(3, 3), name='1', **kwargs)) layers.append(Conv_block(32, filter_size=(3, 3), name='2', **kwargs)) layers.append([MaxPooling2D((4, 2), name='3')]) layers.append(Conv_block(64, filter_size=(3, 3), name='4', **kwargs)) num_filters = (64, (96, 128), (16, 32), 32 ) #TODO Real values from IncV1 Paper? layers.append(InceptionV1_block(num_filters=num_filters, name='5')) layers.append(InceptionV1_block(num_filters=num_filters, name='6')) layers.append([ MaxPooling2D((2, 2), name='7') ]) # TODO Test Inception module with stride=2 instead of max pooling layers.append(InceptionV1_block(num_filters=num_filters, name='8')) layers.append(InceptionV1_block(num_filters=num_filters, name='9')) layers.append([MaxPooling2D((2, 1), name='10')]) layers.append(InceptionV1_block(num_filters=num_filters, name='11')) layers.append(InceptionV1_block(num_filters=num_filters, name='12')) layers.append([MaxPooling2D((2, 1), name='13')]) layers.append(InceptionV1_block(num_filters=num_filters, name='14')) layers.append(InceptionV1_block(num_filters=num_filters, name='15')) layers.append(InceptionV1_block(num_filters=num_filters, name='16')) layers.append(InceptionV1_block(num_filters=num_filters, name='17')) layers.append([GlobalAveragePooling2D(name='18')]) paths = assemble_network(input, layers) merge = Concatenate(name='Conc_Top_1_and_2')(paths) output = Dense(2, name='Output_Top', activation='softmax', kernel_initializer="glorot_uniform")(merge) return Model(inputs=input, outputs=output)
def create_shared_inception_network_2_extra_input(kwargs_inc={}): kwargs_out = dict(kwargs_inc) for key in ['dropout', 'activity_regularizer', 'kernel_regularizer']: kwargs_out.pop(key, None) # auxiliary_input = Input(shape=(10, 4), name='Aux_Input') # x = LocallyConnected1D(10, kernel_size=1, activation='relu', name='21', **kwargs_inc)(auxiliary_input) # x = LocallyConnected1D(20, kernel_size=1, activation='relu', name='22', **kwargs_inc)(x) # # x = LocallyConnected1D(40, kernel_size=1, activation='relu', name='21')(x) # merge_pos = Flatten(name='Flatten_Pos')(x) # output_pos = Dense(2, name='Output_Pos', activation='softmax', kernel_initializer="glorot_uniform", **kwargs_out)(merge_pos) # # return Model(inputs=[auxiliary_input], outputs=[output_pos]) # auxiliary_input = Input(shape=(10, 4, 1), name='aux_input') # x = Conv2D(10, kernel_size=(1, 4), padding='same', activation='relu', kernel_initializer="glorot_normal")(auxiliary_input) # # # x = Conv2D(10, kernel_size=(1, 4), padding='same', activation='relu', kernel_initializer="glorot_uniform")(x) # # # x = Conv2D(10, kernel_size=(1, 4), padding='same', activation='relu', kernel_initializer="glorot_uniform")(x) # # # x = Conv2D(20, kernel_size=(1, 4), padding='same', activation='relu', kernel_initializer="glorot_uniform")(x) # x = Conv2D(20, kernel_size=(1, 4), padding='same', activation='relu', kernel_initializer="glorot_normal")(x) # x = Conv2D(40, kernel_size=(1, 4), padding='same', activation='relu', kernel_initializer="glorot_normal")(x) # merge_pos = Flatten()(x) # # # merge_pos = Dense(56, activation='relu', kernel_initializer="glorot_uniform")(x) # output_pos = Dense(2, name='Output_Pos', activation='softmax', kernel_initializer="glorot_normal")(merge_pos) # return Model(inputs=[auxiliary_input], outputs=[output_pos]) kwargs = { 'padding': 'same', 'dropout': 0.0, 'BN': True, 'kernel_initializer': 'glorot_uniform' } kwargs = merge_two_dicts(kwargs, kwargs_inc) input = [] input.append(Input(shape=(350, 38, 1), name='Wire_1')) input.append(Input(shape=(350, 38, 1), name='Wire_2')) layers = [] layers.append(Conv_block(32, filter_size=(3, 3), name='ConvBl_1', **kwargs)) layers.append(Conv_block(32, filter_size=(3, 3), name='ConvBl_2', **kwargs)) layers.append([MaxPooling2D((4, 2), name='maxp_1')]) layers.append(Conv_block(64, filter_size=(3, 3), name='ConvBl_3', **kwargs)) num_filters = (64, (96, 128), (16, 32), 32 ) #TODO Real values from IncV1 Paper? layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_1', kwargs_inc=kwargs_inc)) layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_2', kwargs_inc=kwargs_inc)) layers.append([ MaxPooling2D((2, 2), name='maxp_2') ]) # TODO Test Inception module with stride=2 instead of max pooling layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_3', kwargs_inc=kwargs_inc)) layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_4', kwargs_inc=kwargs_inc)) layers.append([MaxPooling2D((2, 1), name='maxp_3')]) layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_5', kwargs_inc=kwargs_inc)) layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_6', kwargs_inc=kwargs_inc)) layers.append([MaxPooling2D((2, 1), name='maxp_4')]) layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_7', kwargs_inc=kwargs_inc)) layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_8', kwargs_inc=kwargs_inc)) layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_9', kwargs_inc=kwargs_inc)) layers.append( InceptionV1_block(num_filters=num_filters, name='IncBl_10', kwargs_inc=kwargs_inc)) layers.append([GlobalAveragePooling2D(name='GAverPool_Top')]) paths = assemble_network(input, layers) merge_top = Concatenate(name='Concat_Top_1_and_2')(paths) output_top = Dense(2, name='Output_Top', activation='softmax', kernel_initializer="glorot_uniform", **kwargs_out)(merge_top) # return Model(inputs=input, outputs=output_top) auxiliary_input = Input(shape=(10, 4), name='Aux_Input') auxiliary_flat = Flatten(name='Flatten_Aux_Input')(auxiliary_input) x = Concatenate(name='Merge_Pos_and_Top')([merge_top, auxiliary_flat]) # x = Dense(64, activation='relu', kernel_initializer="glorot_uniform", name='31')(x) # x = Dense(64, activation='relu', kernel_initializer="glorot_uniform", name='32')(x) # x = Dense(64, activation='relu', kernel_initializer="glorot_uniform", name='33')(x) x = Dense(256, activation='relu', kernel_initializer="glorot_uniform", name='31')(x) x = Dropout(0.3, name='drop_1')(x) x = Dense(64, activation='relu', kernel_initializer="glorot_uniform", name='32')(x) x = Dropout(0.3, name='drop_2')(x) x = Dense(16, activation='relu', kernel_initializer="glorot_uniform", name='33')(x) output = Dense(2, name='Output', activation='softmax', kernel_initializer="glorot_uniform")(x) return Model(inputs=[input[0], input[1], auxiliary_input], outputs=[output, output_top])
def create_shared_dcnn_network_2(): kwargs = { 'padding': 'same', 'dropout': 0.0, 'BN': True, 'kernel_initializer': 'glorot_uniform' } # 'kernel_regularizer': regularizers.l2(1.e-2)} input = [] input.append(Input(shape=(350, 38, 1), name='Wire_1')) input.append(Input(shape=(350, 38, 1), name='Wire_2')) layers = [] layers.append( Conv_block(16, filter_size=(5, 3), name='ConvBl_1', max_pooling=None, **kwargs)) layers.append( Conv_block(16, filter_size=(5, 3), name='ConvBl_2', max_pooling=(4, 2), **kwargs)) layers.append( Conv_block(32, filter_size=(5, 3), name='ConvBl_3', max_pooling=None, **kwargs)) layers.append( Conv_block(32, filter_size=(5, 3), name='ConvBl_4', max_pooling=(4, 2), **kwargs)) layers.append( Conv_block(64, filter_size=(3, 3), name='ConvBl_5', max_pooling=None, **kwargs)) layers.append( Conv_block(64, filter_size=(3, 3), name='ConvBl_6', max_pooling=(2, 2), **kwargs)) layers.append( Conv_block(128, filter_size=(3, 3), name='ConvBl_7', max_pooling=None, **kwargs)) layers.append( Conv_block(128, filter_size=(3, 3), name='ConvBl_8', max_pooling=(2, 2), **kwargs)) layers.append( Conv_block(256, filter_size=(3, 3), name='ConvBl_9', max_pooling=None, **kwargs)) #TEST layers.append( Conv_block(256, filter_size=(3, 3), name='ConvBl_10', max_pooling=None, **kwargs)) layers.append([GlobalAveragePooling2D()]) #TEST # layers.append(Conv_block(256, k_size=(3, 3), padding=padding, init=init, dropout=drop, max_pooling=(2, 2), activ=act, kernel_reg=regu)) # layers.append([Flatten()]) # layers.append([Dense(64, activation='relu', kernel_initializer=kwargs['kernel_initializer'])]) #, kernel_regularizer=regu)]) # layers.append([Dense(32, activation='relu', kernel_initializer=kwargs['kernel_initializer'])]) #, kernel_regularizer=regu)]) paths = assemble_network(input, layers) merge = Concatenate(name='Flat_1_and_2')(paths) # merge = paths[0] output = Dense(2, name='Output', activation='softmax', kernel_initializer=kwargs['kernel_initializer'])(merge) return Model(inputs=input, outputs=output)
def create_shared_dcnn_network_4(): kwargs = { 'padding': 'same', 'dropout': 0.0, 'BN': False, 'kernel_initializer': 'glorot_uniform', 'kernel_regularizer': regularizers.l2(1.e-2) } inputU = [] inputU.append(Input(shape=(350, 38, 1), name='U-Wire_1')) inputU.append(Input(shape=(350, 38, 1), name='U-Wire_2')) inputV = [] inputV.append(Input(shape=(350, 38, 1), name='V-Wire_1')) inputV.append(Input(shape=(350, 38, 1), name='V-Wire_2')) layersU = [] layersV = [] for layers in [ layersU, layersV ]: # Use same architecture for U and V wires. Can be changed layers.append( Conv_block(16, filter_size=(5, 3), max_pooling=None, **kwargs)) layers.append( Conv_block(16, filter_size=(5, 3), max_pooling=(4, 2), **kwargs)) layers.append( Conv_block(32, filter_size=(5, 3), max_pooling=None, **kwargs)) layers.append( Conv_block(32, filter_size=(5, 3), max_pooling=(4, 2), **kwargs)) layers.append( Conv_block(64, filter_size=(3, 3), max_pooling=None, **kwargs)) layers.append( Conv_block(64, filter_size=(3, 3), max_pooling=(2, 2), **kwargs)) layers.append( Conv_block(128, filter_size=(3, 3), max_pooling=None, **kwargs)) layers.append( Conv_block(128, filter_size=(3, 3), max_pooling=(2, 2), **kwargs)) layers.append( Conv_block(256, filter_size=(3, 3), max_pooling=None, **kwargs)) layers.append( Conv_block(256, filter_size=(3, 3), max_pooling=None, **kwargs)) layers.append([GlobalAveragePooling2D()]) pathsU = assemble_network(inputU, layersU) pathsV = assemble_network(inputV, layersV) inputUV = [] inputUV.append(Concatenate(name='TPC_1')([pathsU[0], pathsV[0]])) inputUV.append(Concatenate(name='TPC_2')([pathsU[1], pathsV[1]])) layersUV = [] layersUV.append([ Dense(32, activation='relu', kernel_regularizer=kwargs['kernel_regularizer']) ]) layersUV.append([ Dense(16, activation='relu', kernel_regularizer=kwargs['kernel_regularizer']) ]) pathsUV = assemble_network(inputUV, layersUV) merge = Concatenate(name='Flat_1_and_2')(pathsUV) output = Dense(2, name='Output', activation='softmax', kernel_initializer=kwargs['kernel_initializer'])(merge) inputUV = [] inputUV.append(inputU[0]) inputUV.append(inputV[0]) inputUV.append(inputU[1]) inputUV.append(inputV[1]) return Model(inputs=inputUV, outputs=output)
def build_generator(self): noise = Input(shape=(self.noise_dims, )) output_dim = self.latent_dim layers = [] a = LeakyReLU(alpha=0.2) # a = Activation('relu') layers.append(Dense(self.noise_dims)) layers.append(a) layers.append(BatchNormalization(momentum=0.8)) layers.append(Dense(output_dim)) layers.append(a) layers.append(BatchNormalization(momentum=0.8)) layers.append(Dense(output_dim)) layers.append(a) layers.append(BatchNormalization(momentum=0.8)) layers.append(Dense(output_dim, activation='tanh')) return noise, layers