def __init__(self,Name,LearningRate = 0.0001,img_shape,latent_dim): self.Name = Name self.LearningRate = LearningRate self.img_shape = img_shape self.latent_dim = latent_dims self.trained = 0 #self.source_img = tf.Variable() #self.target_img = tf.Variable() self.g_optimizer = Adam(learning_rate=self.LearningRate) self.d_optimizer = Adam(learning_rate=self.LearningRate) self.optimizer = Adam(learning_rate = self.LearningRate) # compile the IR Disciminator model - Target self.IR_discriminator = self.discriminator_network() self.IR_discriminator.compile(optimizer=self.optimizer,loss='binary_crossentropy',metric=['accuracy']) # compile the IR Generator model - Target self.IR_f_net = self.feature_extractor_network() self.IR_g_net = self.generator_network()(self.IR_f_net) self.IR_generator = Model(inputs = in_image_IR, outputs = self.IR_g_net) self.IR_generator.compile(optimizer=self.optimizer,loss='binary_crossentropy',metric=['accuracy']) # compile the Visual Disciminator model - Source self.Visual_discriminator = self.discriminator_network() self.Visual_discriminator.compile(optimizer=self.optimizer,loss='binary_crossentropy',metric=['accuracy']) # compile the Visual Generator model - Visual self.Visual_f_net = self.feature_extractor_network() self.Visual_g_net = self.generator_network()(self.Visual_f_net) self.Visual_generator = Model(inputs = in_image_Vis, outputs = self.Visual_g_net) self.Visual_generator.compile(optimizer=self.optimizer,loss='binary_crossentropy',metric=['accuracy'])
def __init__(self, args): self.input_shape = 28 self.num_classes = 2 self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile( loss=['binary_crossentropy', 'categorical_crossentropy'], loss_weights=[0.5, 0.5], optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generates imgs noise = Input(shape=(64, )) img = self.generator(noise) # For the combined model we will only train the generator self.discriminator.trainable = False # The valid takes generated images as input and determines validity valid, _ = self.discriminator(img) # The combined model (stacked generator and discriminator) # Trains generator to fool discriminator self.combined = Model(noise, valid) self.combined.compile(loss=['binary_crossentropy'], optimizer=optimizer)
def main(): # Load the data train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation( ) num_features = train_data.shape[1] print('Training data shape = {}'.format(train_data.shape)) print('Validation data shape = {}'.format(validation_data.shape)) print('Test data shape = {}'.format(test_data.shape)) # Compile model model = Sequential() model.add(BatchNormalization(input_shape=(1, ))) model.add(Dense(10, use_bias=True)) model.add(Activation('relu')) model.add(Dense(1, use_bias=True)) learning_rates = [1e-4, 1e-3, 1e-2] adam_optimizer = Adam(lr=learning_rates[0]) model.compile(loss='mean_absolute_error', optimizer=adam_optimizer, metrics=[metrics.mae]) # Print out model architecture summary model.summary() # Train the model model.fit(x=train_data, y=train_label, validation_data=(validation_data, validation_label), epochs=100 #,callbacks=[kp.plot_losses] ) return model
def create_model(self): model = Sequential() state_shape = self.env.observation_space.shape model.add(Conv1D(72, 3, input_dim=state_shape, activation="relu")) model.add(Dense(48, activation="relu")) model.add(Dense(24, activation="relu")) model.add(Dense(self.env.action_space.n)) model.compile(loss="mean_squared_error", optimizer=Adam(lr=self.learning_rate)) return model
def main1(): # Load the data train_data, train_label, validation_data, validation_label, test_data, test_label = data_preparation_moe( ) num_features = train_data.shape[1] print('Training data shape = {}'.format(train_data.shape)) print('Validation data shape = {}'.format(validation_data.shape)) print('Test data shape = {}'.format(test_data.shape)) #print('Training laebl shape = {}'.format(len(train_label))) # Set up the input layer input_layer = Input(shape=(num_features, )) # Set up MMoE layer mmoe_layers = MMoE(units=16, num_experts=8, num_tasks=2)(input_layer) output_layers = [] output_info = ['y0', 'y1'] # Build tower layer from MMoE layer for index, task_layer in enumerate(mmoe_layers): tower_layer = Dense(units=8, activation='relu', kernel_initializer=VarianceScaling())(task_layer) output_layer = Dense(units=1, name=output_info[index], activation='linear', kernel_initializer=VarianceScaling())(tower_layer) output_layers.append(output_layer) # Compile model model = Model(inputs=[input_layer], outputs=output_layers) learning_rates = [1e-4, 1e-3, 1e-2] adam_optimizer = Adam(lr=learning_rates[0]) model.compile(loss={ 'y0': 'mean_squared_error', 'y1': 'mean_squared_error' }, optimizer=adam_optimizer, metrics=[metrics.mae]) # Print out model architecture summary model.summary() # Train the model model.fit(x=train_data, y=train_label, validation_data=(validation_data, validation_label), epochs=100) return model
def get_myUNet(img_rows, img_cols): model = Unet.UNetContinuous([in_rows, in_col, in_ch], out_ch=out_ch, start_ch=16, depth=3, inc_rate=2., activation='relu', dropout=0.5, batchnorm=True, maxpool=True, upconv=True, residual=False) model.compile(optimizer=Adam(lr=1e-4), loss=mean_squared_error, metrics=[mean_squared_error, mean_absolute_error]) return model
def _set_optimizer(self, lr): from tf.keras.optimizers import Adam self.optimizer = Adam(lr=lr)