def build_floor_discriminator(self):
     initilization_method = 'he_normal'
     regularzation_penalty = 0.08
     y = Input(shape=(self.discriminator_num,))
     import flipGradientTF
     Flip = flipGradientTF.GradientReversal(1)
     dann_in = Flip(y)
     dann_out = Dense(2)(dann_in)     
     # from flip_gradient import flip_gradient
     # dann_out = flip_gradient(y)
     # dann_out = y
     
     l1 = Dense(800, activation=self.act_fun, input_dim= self.encode_num , kernel_initializer=initilization_method, 
                kernel_regularizer=regularizers.l2(regularzation_penalty))(dann_out)
     # l1 = Dropout(self.dropout)(l1)
     l3 = Dense(800, activation=self.act_fun, kernel_initializer=initilization_method,
                 kernel_regularizer=regularizers.l2(regularzation_penalty))(l1)
     # l3 = Dropout(self.dropout)(l3)
     l3 = Dense(800, activation=self.act_fun, kernel_initializer=initilization_method,
                 kernel_regularizer=regularizers.l2(regularzation_penalty))(l3)
     l3 = Dense(800, activation=self.act_fun, kernel_initializer=initilization_method,
                 kernel_regularizer=regularizers.l2(regularzation_penalty))(l3)
     l4 = Dense(20, activation=self.act_fun, kernel_initializer=initilization_method,
                 kernel_regularizer=regularizers.l2(regularzation_penalty))(l3)
             
     output = Dense(1, activation='sigmoid', kernel_initializer=initilization_method)(l4)
     return Model(y, output)
def load_CMU_MOSEI_model(location=None):

	if(location != None):
		model = keras.models.load_model(location)
		print("Loaded the model.")
		return model

	X = Input(shape = (15, 512,), name = 'ER_input_layer')			#	m Tx nx

	Y1 = CuDNNLSTM(130, name = 'private_ER_lstm_layer', return_sequences = True)(X)
	Y2 = CuDNNLSTM(130, name = 'shared_lstm_layer', return_sequences = True)(X)




	H = Concatenate(axis = -1)([Y1, Y2])

	H = TimeDistributed(Dense(50, activation = 'tanh'), name = 'ER_attention_hidden_layer_1')(H)
	H = TimeDistributed(Dropout(rate = 0.25))(H)

	alpha = TimeDistributed(Dense(2, activation = 'softmax'), name = 'ER_attention_output_layer')(H)




	F = Lambda(lambda x : alpha[:, :, 0:1]*Y1 + alpha[:, :, 1:2]*Y2, name = 'ER_attention_fusion_layer')(alpha)

	Y = TimeDistributed(Dense(45, activation = 'relu'), name = 'ER_hidden_layer_1')(F)
	Y = TimeDistributed(Dropout(rate = 0.25))(Y)
	
	Y = TimeDistributed(Dense(7, activation = None), name = 'ER_output_layer')(Y)




	Y1 = Lambda(lambda x: K.sum(Y1, axis = 1))(Y1)
	Y2 = Lambda(lambda x: K.sum(Y2, axis = 1))(Y2)

	Y_diff = Lambda(lambda x: K.mean(K.abs(K.dot(K.transpose(Y1), Y2))), name = 'ER_L_diff_layer')(Y1)

	


	Y_discriminator_input = flipGradientTF.GradientReversal(0.3)(Y2)

	Y_discriminator_output = Dense(40, activation = 'relu', name = 'shared_discriminator_hidden_layer_1')(Y_discriminator_input)
	Y_discriminator_output = Dropout(0.25)(Y_discriminator_output)

	Y_discriminator_output = Dense(2, activation = 'softmax', name = 'shared_discriminator_output_layer')(Y_discriminator_output)





	model = Model(inputs = X, outputs = [Y, Y_discriminator_output, Y_diff])

	print("Created a new CMU MOSEI model.")

	return model
Beispiel #3
0
def create_domain_autoencoder(encoding_dim, input_dim, num_data_sets):

    ################################
    # set up the  models
    ################################

    # encoding_dim is the size of our encoded representations 
    #input_dim is the number of kmers (or columns) in our input data
    input_img = Input(shape=(input_dim,))


    ### Step 1: create an autoencoder:
    
    # "encoded" is the encoded representation of the input
    encoded = Dense(encoding_dim, activation='relu')(input_img)

    # "decoded" is the lossy reconstruction of the input
    decoded = Dense(input_dim, activation='softmax')(encoded)

    # this model maps an input to its reconstruction
    #autoencoder = Model(inputs=input_img, outputs=decoded)


    ### Step 2: create a categorical classifier for datasets (domains) without the decoded step. 
    domain_classifier = Dense(num_data_sets, activation='softmax')(encoded)
    #domain_classifier_model = Model(inputs=input_img, outputs=domain_classifier)

    
    ### Step 3: next create a model with the flipped gradient to unlearn the domains
    hp_lambda=1
    Flip = flipGradientTF.GradientReversal(hp_lambda)
    dann_in = Flip(encoded)
    dann_out = Dense(num_data_sets, activation='softmax')(dann_in)
    dann_model= Model(inputs=input_img, outputs=dann_out)

    ### Step 4: create a classifier for healthy vs diseased based on this flipped gradient
    healthy_disease_classifier=Dense(1, activation='sigmoid')(encoded)
    healthy_disease_classifier_model = Model(inputs=input_img, outputs=healthy_disease_classifier)

    # multitask learning:
    autoencoder_domain_classifier_model = Model(inputs=input_img, outputs=[decoded, domain_classifier])

    
    ######################
    # compile the models:
    ######################

    #autoencoder.compile(optimizer='adadelta', loss='kullback_leibler_divergence')
    #domain_classifier_model.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics=['accuracy'])
    autoencoder_domain_classifier_model.compile(optimizer='adadelta', loss=['kullback_leibler_divergence','categorical_crossentropy'],  metrics=['accuracy'])

    dann_model.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics=['accuracy'])

    healthy_disease_classifier_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) 

    
    #return autoencoder, domain_classifier_model, dann_model, healthy_disease_classifier_model
    return autoencoder_domain_classifier_model, dann_model, healthy_disease_classifier_model
Beispiel #4
0
def create_domain_classifier(encoding_dim, input_dim, num_data_sets, lambda_value):

    ################################
    # set up the  models
    ################################

    # encoding_dim is the size of our encoded representations 
    #input_dim is the number of kmers (or columns) in our input data
    input_img = Input(shape=(input_dim,))

    ### Step 1: create a categorical classifier for datasets (domains). 
    
    # "encoded" is the encoded representation of the input
    encoded = Dense(encoding_dim, activation='relu')(input_img)
    domain_classifier = Dense(num_data_sets, activation='softmax')(encoded)
    domain_classifier_model = Model(inputs=input_img, outputs=domain_classifier)

    
    ### Step 2: next create a model with the flipped gradient to unlearn the domains
    hp_lambda=lambda_value
    Flip = flipGradientTF.GradientReversal(hp_lambda)
    dann_in = Flip(encoded)
    dann_out = Dense(num_data_sets, activation='softmax')(dann_in)
    dann_model= Model(inputs=input_img, outputs=dann_out)

    # multitask learning:
    model = Model(inputs=input_img, outputs=[domain_classifier, dann_out])

    
    ######################
    # compile the models:
    ######################

    domain_classifier_model.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics=['accuracy'])
    dann_model.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics=['accuracy'])
    model.compile(optimizer='adadelta', loss=['categorical_crossentropy', 'categorical_crossentropy'],  metrics=['accuracy'])
    
    #return domain_classifier_model, dann_model, multi-task model
    return domain_classifier_model, dann_model, model