Пример #1
0
def compute_temperature_scaling(logit_model, x: ndarray, y: ndarray) -> float:
    temp = Variable(initial_value=1.0, trainable=True, dtype=float32)
    logits = logit_model.predict(x)

    def compute_loss():
        divided_prediction = divide(logits, temp)
        loss = reduce_mean(
            softmax_cross_entropy_with_logits_v2(labels=convert_to_tensor(y),
                                                 logits=divided_prediction))
        return loss

    optimizer = Adam(learning_rate=0.01)
    for i in range(1000):
        optimizer.minimize(compute_loss, var_list=[temp])
    return temp.numpy()
    def model(self):
        # Don't train the discriminator model weights
        self.discriminator.trainable = False

        # Send the image to the generator model
        generator_output = self.generator(self.input_image)

        # Send the actual input and generator output to discriminator model
        discriminator_out = self.discriminator(
            [self.input_image, generator_output])

        #  Final Model
        model = Model(self.input_image, [discriminator_out, generator_output])
        optimizer = Adam(lr=0.0002, beta_1=0.5)
        model.compile(loss=['binary_crossentropy', 'mae'],
                      optimizer=optimizer,
                      loss_weights=[1, 100])
        print(
            "\n******************************************* GAN Model ********************************************"
        )
        print(model.summary())
        plot_model(model,
                   "modelplots/pix2pix/gan.png",
                   show_shapes=True,
                   show_layer_names=True)
        return model
Пример #3
0
 def create_model(self):
     self.model = tf.keras.Sequential([
         tf.keras.layers.Dense(24, "relu", input_dim=self.observation_space_dim),
         tf.keras.layers.Dense(48, "relu"),
         tf.keras.layers.Dense(2, "relu")
     ])
     self.model.compile(loss="mse", optimizer=Adam(lr=0.01, decay=0.01))
Пример #4
0
    def build_model(self, input_shape, nb_classes):

        input_layer = keras.layers.Input(input_shape)
        #num_feat=30
        nb_filters = 8
        kernel_size = 12
        dilations = [2**i for i in range(2)]
        padding = 'causal'
        nb_stacks = 1
        #max_len=X_train[0:1].shape[1]
        use_skip_connections = True
        use_batch_norm = True
        dropout_rate = 0.05
        kernel_initializer = 'he_normal'
        #lr=0.00
        activation = 'relu'
        use_layer_norm = True

        return_sequences = True
        #name='tcn_1'
        enc = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
                  use_skip_connections, dropout_rate, return_sequences,
                  activation, kernel_initializer, use_batch_norm,
                  use_layer_norm)(input_layer)
        #output_layer = keras.layers.Dense(nb_classes,
        emb = keras.layers.Dense(16, activation='relu')(enc)
        #emb_rep = keras.layers.RepeatVector(input_shape[0])(emb)
        return_sequences = True
        nb_filters = 8
        decod = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
                    use_skip_connections, dropout_rate, return_sequences,
                    activation, kernel_initializer, use_batch_norm,
                    use_layer_norm)(emb)

        output = emb = keras.layers.Dense(input_shape[1],
                                          activation='sigmoid')(enc)

        model = keras.models.Model(inputs=input_layer, outputs=output)

        model.compile(loss='mse', optimizer=Adam())

        self.encoder = keras.models.Model(inputs=input_layer, outputs=enc)
        self.decoder = keras.models.Model(inputs=input_layer, outputs=decod)
        self.emb = keras.models.Model(inputs=input_layer, outputs=emb)
        print(model.summary())

        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                      factor=0.5,
                                                      patience=10,
                                                      min_lr=0.0001)

        file_path = self.output_directory + 'best_model'

        model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path,
                                                           monitor='loss',
                                                           save_best_only=True)

        self.callbacks = [reduce_lr, model_checkpoint]

        return model
Пример #5
0
    def value_network(self):
        # inputA = Input(shape=self.state_size)
        # inputA = Flatten()(inputA)
        '''model = tf.keras.Sequential([
            Dense(32, activation='relu', input_shape=(self.state_size)),
            Dense(16, activation='relu', input_shape=(self.state_size)),
            Dense(16, activation='relu', input_shape=(self.state_size)),
            Dense(1, activation='linear', input_shape=(self.state_size))
    ])
    model.compile(loss='mse', optimizer=Adam(lr = self.value_lr))'''
        from tensorflow.python.keras.optimizer_v2.adam import Adam
        from tensorflow.keras.models import Model
        from tensorflow.keras.layers import Dense, Input, Flatten, multiply

        inputA = Input(shape=self.state_size)
        inputB = Input(shape=(self.action_size, ))
        x = Flatten()(inputA)
        x = Dense(24, input_dim=self.state_size,
                  activation='relu')(x)  # fully connected
        x = Dense(24, activation='relu')(x)
        x = Dense(self.action_size, activation='linear')(x)
        outputs = multiply([x, inputB])
        model = Model(inputs=[inputA, inputB], outputs=outputs)
        model.compile(loss='mse', optimizer=Adam(lr=self.value_lr))
        return model
Пример #6
0
def neural_network_model(input_size):
    model = Sequential()
    model.add(Dense(128, input_shape=input_size))
    # network = input_data(shape=[None, input_size, 1], name='input')
    model.add(Dropout(0.8))
    # network = fully_connected(network, 128, activation='relu')
    # network = dropout(network, 0.8)
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.8))
    # network = fully_connected(network, 256, activation='relu')
    # network = dropout(network, 0.8)
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.8))
    # network = fully_connected(network, 512, activation='relu')
    # network = dropout(network, 0.8)
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.8))
    # network = fully_connected(network, 256, activation='relu')
    # network = dropout(network, 0.8)
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.8))
    # network = fully_connected(network, 128, activation='relu')
    # network = dropout(network, 0.8)
    model.add(Dense(2, activation='softmax'))
    model.compile(loss="categorical_crossentropy", optimizer=Adam(lr=LR))

    # network = fully_connected(network, 2, activation='softmax')
    # network = regression(network, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
    # model = tflearn.DNN(network, tensorboard_dir='log')

    return model
Пример #7
0
def train(train_ds, test_ds, model, steps_per_epoch, learning_rate, model_path,
          tensorboard_callback, weights, class_weights, val_steps, checkpoint):
    # Compile the model
    model.compile(
        optimizer=Adam(learning_rate=learning_rate),
        # optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate),
        loss='categorical_crossentropy',
        metrics=get_metrics())

    print("Training...")
    model.summary()
    history = model.fit(
        train_ds.repeat(),
        epochs=EPOCHS,
        steps_per_epoch=steps_per_epoch,
        validation_data=test_ds.repeat(),
        validation_steps=val_steps,
        callbacks=[tensorboard_callback, checkpoint],
        # class_weight=class_weights,
        # sample_weight=weights,
        # verbose=2,
        verbose=1,
        workers=1,
        use_multiprocessing=False
        # class_weight=class_weights
    )

    tf.contrib.saved_model.save_keras_model(model, model_path)
    # model.save(model_path + ".full_model.h5")
    return history
Пример #8
0
def create_model2():
    """

    创建 左右 模型

    :return: 模型
    """
    model = Sequential()
    model.add(
        LSTM(64,
             input_shape=(30, 9),
             return_sequences=True,
             kernel_regularizer=tf.keras.regularizers.l2(0.0001)))
    model.add(LSTM(64, kernel_regularizer=tf.keras.regularizers.l2(0.0001)))
    model.add(Dropout(0.2))
    model.add(Dense(2, activation="softmax"))

    learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
        0.0003, decay_steps=3000, decay_rate=0.8)

    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(learning_rate),
                  metrics=['acc'])
    model.summary()
    return model
Пример #9
0
def create_dummy_classifier(window_size: int,
                            num_rows_df: int,
                            num_output_fields: int,
                            neurons_rnn: int = 10,
                            dropout: float = 0.0,
                            learning_rate: float = 0.01,
                            bidirection: bool = True,
                            return_sequences: bool = False):
                            
    lr_schedule = keras.optimizers.schedules.ExponentialDecay(
        initial_learning_rate=learning_rate,
        decay_steps=10000,
        decay_rate=0.9)

    model = keras.Sequential(name='dummy_classifier')

    model.add(Input(shape=(window_size, num_rows_df), name='input'))

    if bidirection:
        model.add(Bidirectional(
            LSTM(neurons_rnn, return_sequences=return_sequences),
            name='bidirection'))
    else:
        model.add(LSTM(neurons_rnn, name="rnn",
                       return_sequences=return_sequences))
    if return_sequences:
        model.add(Flatten())
    model.add(Dropout(dropout, name='dropout'))
    model.add(Dense(num_output_fields, activation='sigmoid', name='dense_output'))

    model.summary()

    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(learning_rate=lr_schedule), metrics=['accuracy', 'binary_accuracy'])
    return model
Пример #10
0
def read_cmd_line_args(hyperparameters, dataset):
    parser = ArgumentParser()
    parser.add_argument('--meta_heuristic', type=str)
    parser.add_argument('--meta_heuristic_order', type=str)
    parser.add_argument('--optimizer', type=str)
    parser.add_argument('--id', type=str)
    parser.add_argument('--dataset', type=str)
    args = parser.parse_args()

    if args.id is not None:
        hyperparameters.experiment_id = args.id

    if args.meta_heuristic is not None:
        hyperparameters.meta_heuristic = args.meta_heuristic

    if args.meta_heuristic_order is not None and args.meta_heuristic_order == 'first':
        hyperparameters.meta_heuristic_order = 'first'
    elif args.meta_heuristic_order is not None and args.meta_heuristic_order == 'last':
        hyperparameters.meta_heuristic_order = 'last'

    if args.optimizer is not None and args.optimizer == 'adam':
        hyperparameters.learning_optimization = 'Adam'
        opt = Adam(learning_rate=hyperparameters.init_lr, decay=True)
    else:
        hyperparameters.learning_optimization = 'Stochatic Gradient Descent'
        opt = SGD(lr=hyperparameters.init_lr, momentum=0.9)

    if args.dataset is not None:
        if 'cbis' in args.dataset:
            dataset = cbis_ddsm_data_set
        if 'bcs' in args.dataset:
            dataset = bcs_data_set

    return hyperparameters, opt, dataset
Пример #11
0
 def _build_keras_network(self):
     """
     Constructs and returns a sequential model using Keras. The model
     architecture is an Artificial Neural Network that will flatten the
     input. Each hidden layer uses ReLU activation.
     :return: sequential model to use as the function approximator
     :rtype: torch.nn.Sequential
     """        
     # Import the necessary packages for keras.
     from tensorflow.python.keras.optimizer_v2.adam import Adam
     from tensorflow.keras.models import Sequential
     from tensorflow.keras.layers import Dense, Input, Flatten
     
     # Sequential model to build the architecture for.
     model = Sequential()
     
     '''
     Construct Model Architecture.
     '''  
     # Input and flatten layers to accept and flatten the state as
     # input.
     model.add(Input(shape = self.state_size))
     model.add(Flatten())
     # Create and add n hidden layers sequentially to the architecture,
     # where n is the length of hidden_sizes.
     for size in self.hidden_sizes:
         model.add(Dense(size, activation= 'relu'))
     # Create the output layer.
     model.add(Dense(self.action_size, activation= 'linear'))
     
     # Compile and return the model.
     model.compile(loss='mse', optimizer=Adam(lr=0.001))
     return model
Пример #12
0
    def __init__(self,
                 n_episodes=100,
                 batch_size=20,
                 epsilon=1.0,
                 epsilon_min=0.01,
                 epsilon_log_decay=0.995):
        self.epsilon_min = epsilon_min
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_log_decay
        self.n_episodes = n_episodes
        self.batch_size = batch_size
        self.config_tensorflow()
        self.env = gym.make("CartPole-v1")
        self.observation_space = self.env.observation_space.shape[0]
        self.action_space = self.env.action_space.n

        self.memory = deque(maxlen=1000000)

        self.model = tf.keras.Sequential([
            tf.keras.layers.Dense(48, "relu",
                                  input_dim=self.observation_space),
            tf.keras.layers.Dense(16, "relu"),
            tf.keras.layers.Dense(2, "linear")
        ])
        self.model.compile(loss="mse", optimizer=Adam(lr=0.01, decay=0.01))
Пример #13
0
    def __init__(self, optimizer=Adam(), loss=categorical_crossentropy, metrics=[categorical_accuracy, 'top_k_categorical_accuracy']):
        super().__init__(optimizer=optimizer, loss=loss, metrics=metrics, MODEL_NAME=CIFAR_100_CONV_NAME)
        self.sequential_layers = [
            Conv2D(64, [3, 3], padding='same', activation='elu', kernel_regularizer=l2(), input_shape=(32,32,3)),
            BatchNormalization(),
            Conv2D(64, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            MaxPool2D(),
            Dropout(rate=0.3),

            Conv2D(128, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            Conv2D(128, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            MaxPool2D(),
            Dropout(rate=0.4),

            Conv2D(256, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            Conv2D(256, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            MaxPool2D(),
            Dropout(rate=0.5),

            Flatten(),
            Dense(100, activation='softmax')
        ]
def init_dqn(env, nb_actions):
    """ Initialize the DQN agent using the keras-rl package.

    :param env: the environment to be played, required to determine the input size
    :param nb_actions: number of actions
    :return: DQN Agent
    """
    # Next, we build a very simple model.
    model = Sequential()
    model.add(Flatten(input_shape=(1, ) + env.observation_space.shape))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dense(16))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))
    print(model.summary())

    # compile agent
    memory = SequentialMemory(limit=50000, window_length=1)
    policy = BoltzmannQPolicy()
    dqn = DQNAgent(model=model,
                   nb_actions=nb_actions,
                   memory=memory,
                   nb_steps_warmup=10,
                   target_model_update=1e-2,
                   policy=policy)
    dqn.model_name = f"DQN"
    dqn.compile(Adam(lr=1e-3), metrics=['mae'])
    return dqn
def init_sarsa(env, nb_actions, lr=1e-3):
    """ Initialize the Sarsa agent using the keras-rl package.

    :param env: the environment to be played, required to determine the input size
    :param nb_actions: number of actions
    :param lr: learning rate
    :return: Sarsa Agent
    """
    # Next, we build a very simple model.
    model = Sequential()
    model.add(Flatten(input_shape=(1, ) + env.observation_space.shape))
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))

    # SARSA does not require a memory.
    policy = BoltzmannQPolicy()
    sarsa = SARSAAgent(model=model,
                       nb_actions=nb_actions,
                       nb_steps_warmup=10,
                       policy=policy)
    sarsa.model_name = f"SARSA"
    sarsa.compile(Adam(lr=lr), metrics=['mae'])
    return sarsa
Пример #16
0
 def policy_network(self):
     '''try:
   # inputA = Input(shape=self.state_size)
   # inputA = Flatten()(inputA)
   model = tf.keras.Sequential([
           Dense(32, activation='relu', input_shape=(self.state_size)),
           Dense(16, activation='relu', input_shape=(self.state_size)),
           Dense(self.action_size, activation='softmax', input_shape=(self.state_size))
   ])
   kl = tf.keras.losses.KLDivergence()
   model.compile(loss=kl, optimizer=Adam(lr = self.policy_lr))
   return model
 except:
   print("\n\n\n")
   print(sys.exc_info())
   sys.exit()'''
     from tensorflow.python.keras.optimizer_v2.adam import Adam
     from tensorflow.keras.models import Model
     from tensorflow.keras.layers import Dense, Input, Flatten, multiply
     inputA = Input(shape=self.state_size)
     inputB = Input(shape=(self.action_size, ))
     x = Flatten()(inputA)
     x = Dense(24, input_dim=self.state_size,
               activation='relu')(x)  # fully connected
     x = Dense(24, activation='relu')(x)
     outputs = Dense(self.action_size, activation='softmax')(x)
     model = Model(inputs=[inputA, inputB], outputs=outputs)
     kl = tf.keras.losses.KLDivergence()
     model.compile(loss=kl, optimizer=Adam(lr=self.policy_lr))
     return model
Пример #17
0
    def buildQNetwork(self):
        from tensorflow.python.keras.optimizer_v2.adam import Adam
        from tensorflow.keras.models import Model
        from tensorflow.keras.layers import Input, Dense, Conv2D
        from tensorflow.keras.layers import Flatten, TimeDistributed, LSTM, multiply

        input_shape = (self.historylength, ) + self.state_size
        inputA = Input(shape=input_shape)
        inputB = Input(shape=(self.action_size, ))

        if len(self.state_size) == 1:
            x = TimeDistributed(
                Dense(10, input_shape=input_shape, activation='relu'))(inputA)
        else:
            x = TimeDistributed(Conv2D(16, 8, strides=4,
                                       activation='relu'))(inputA)
            x = TimeDistributed(Conv2D(32, 4, strides=2, activation='relu'))(x)
        x = TimeDistributed(Flatten())(x)
        x = LSTM(256)(x)
        x = Dense(10, activation='relu')(x)  # fully connected
        x = Dense(10, activation='relu')(x)
        x = Dense(self.action_size)(x)
        outputs = multiply([x, inputB])
        model = Model(inputs=[inputA, inputB], outputs=outputs)
        model.compile(loss='mse', optimizer=Adam(lr=0.0001, clipvalue=1))
        return model
Пример #18
0
    def compile_model(self):
        log('Compiling LSTM-RNN model...')

        optimizer = Adam(lr=self._learning_rate)
        self._model.compile(loss='categorical_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])
Пример #19
0
def iterative_train(theories: List[Theory],
                    X: np.ndarray, Y: np.ndarray,
                    loss_func: Callable[..., tf.Tensor] = generalized_mean_loss,
                    optimizer_pred: OptimizerV2 = Adam(),
                    optimizer_domain: OptimizerV2 = Adam(),
                    K: int = 10000,
                    eps: float = 10.,
                    loss_kwargs: Optional[Dict] = None):

    if loss_kwargs is None:
        loss_kwargs = {"gamma": -1, "eps": eps}

    trainable_pred_variables = sum(map(lambda x: x.trainable_pred_variables(), theories), [])

    trainable_domain_variables = sum(map(lambda x: x.trainable_domain_variables(), theories), [])
    # flag = False

    for k in range(K):  # Main training loop
        """ Can be optimized by removing the double evaluation """

        # Predictor optimization
        with tf.GradientTape() as tape:

            loss = loss_func(theories, X, Y, **loss_kwargs)
            if not k % 100:
                print("Step %d loss %.5f" % (k, loss.numpy()))

        gradients = tape.gradient(loss, trainable_pred_variables)
        optimizer_pred.apply_gradients(zip(gradients, trainable_pred_variables))

        best_idx = assign_theories(theories, X, Y, ) # (batch, ) labels for the domain classification

        with tf.GradientTape() as tape:

            domain_probs = []
            for theory in theories:
                preds = theory.domain(X)  # (batch, 1)
                domain_probs.append(preds)

            domain_probs = tf.concat(domain_probs, axis=1)  # (batch, theories)
            domain_probs = softmax(domain_probs, axis=1)  # (batch, theories)

            cce = SparseCategoricalCrossentropy()
            loss = cce(y_true=best_idx, y_pred=domain_probs)

        gradients = tape.gradient(loss, trainable_domain_variables)
        optimizer_domain.apply_gradients(zip(gradients, trainable_domain_variables))
Пример #20
0
 def compile(self,
             loss_function,
             metric_functions=None,
             optimizer=Adam(1e-3, epsilon=1e-6)):
     self.require_model_loaded()
     return self.model.compile(loss=loss_function,
                               optimizer=optimizer,
                               metrics=metric_functions)
Пример #21
0
 def _build_net(self):
     inputs = Input(shape=(self.n_features,))
     x = Dense(32, activation='relu', kernel_regularizer=l2(self.l2))(inputs)
     x = Dense(16, activation='relu', kernel_regularizer=l2(self.l2))(x)
     output = Dense(self.n_actions, activation='softmax', kernel_regularizer=l2(self.l2))(x)
     self.model = Model(inputs=inputs, outputs=output)
     self.model.compile(optimizer=Adam(learning_rate=self.lr), loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                        metrics=['accuracy'])
Пример #22
0
def make_optimizer(optimizer, lr, momentum, decay, nesterov, epsilon):
    if optimizer == 'sgd':
        optim = SGD(learning_rate=lr, momentum=momentum, decay=decay, nesterov=nesterov)
    elif optimizer == 'adam':
        optim = Adam(lr=lr, decay=decay, epsilon=epsilon)
    else:
        raise ValueError('Invalid config for optimizer.optimizer: ' + optimizer)
    return optim
Пример #23
0
    def build_model(self, input_shape, nb_classes):
        input_layer = keras.layers.Input(input_shape)

        num_feat = 30
        num_classes = 2
        nb_filters = 32
        kernel_size = 5
        dilations = [2**i for i in range(4)]
        padding = 'causal'
        nb_stacks = 1
        #max_len=X_train[0:1].shape[1]
        use_skip_connections = True
        use_batch_norm = True
        dropout_rate = 0.05
        kernel_initializer = 'he_normal'
        #lr=0.00
        activation = 'linear'
        use_layer_norm = True

        return_sequences = true
        #name='tcn_1'
        x = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
                use_skip_connections, dropout_rate, return_sequences,
                activation, kernel_initializer, use_batch_norm,
                use_layer_norm)(input_layer)
        """
		return_sequences=False
		#name='tcn_1'
		x = TCN(nb_filters, kernel_size, nb_stacks, dilations, padding,
		    use_skip_connections, dropout_rate, return_sequences,
		    activation, kernel_initializer, use_batch_norm, use_layer_norm)(input_layer)
        
		"""
        output_layer = keras.layers.Dense(nb_classes, activation='sigmoid')(x)

        model = keras.models.Model(inputs=input_layer, outputs=output_layer)

        model.compile(loss='binary_crossentropy',
                      optimizer=Adam(),
                      metrics=['accuracy'])

        print(model.summary())

        reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                      factor=0.5,
                                                      patience=50,
                                                      min_lr=0.0001)

        file_path = self.output_directory + 'best_model'

        model_checkpoint = keras.callbacks.ModelCheckpoint(filepath=file_path,
                                                           monitor='loss',
                                                           save_best_only=True)

        self.callbacks = [reduce_lr, model_checkpoint]

        return model
Пример #24
0
 def _build_critic(self):
     inputs = Input(shape=(self.n_features, ))
     x = Dense(32, activation='relu',
               kernel_regularizer=l2(self.l2))(inputs)
     x = Dense(16, activation='relu', kernel_regularizer=l2(self.l2))(x)
     output = Dense(1, kernel_regularizer=l2(self.l2))(x)
     self.critic = Model(inputs=inputs, outputs=output)
     self.critic.compile(optimizer=Adam(lr=self.critic_lr),
                         loss='mean_squared_error',
                         metrics=['accuracy'])
Пример #25
0
    def close_binary_network(self, model):
        """ Last layer to predict binary outputs """
        model.add(Dense(2, activation='softmax'))
        optimizer = Adam(lr=self.params.learning_rate)
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      metrics='accuracy')

        if self.params.summary is True:
            model.summary()
Пример #26
0
 def __init__(self,
              optimizer=Adam(),
              loss=categorical_crossentropy,
              metrics=[categorical_accuracy, top_k_categorical_accuracy]):
     super().__init__(optimizer=optimizer,
                      loss=loss,
                      metrics=metrics,
                      MODEL_NAME=MOBILENETV2_NAME)
     self.mobilenetv2 = MobileNetV2()
     self.mobilenetv2.layers[-1].activation = tf.keras.activations.linear
Пример #27
0
 def __init__(self,
              optimizer=Adam(),
              loss=categorical_crossentropy,
              metrics=[top_k_categorical_accuracy, categorical_accuracy]):
     super().__init__(optimizer=optimizer,
                      loss=loss,
                      metrics=metrics,
                      MODEL_NAME=INCEPTION_V3_NAME)
     self.imagenet = InceptionV3()
     self.imagenet.layers[-1].activation = tf.keras.activations.linear
Пример #28
0
    def construct_q_network(self):
        """ Construct both the actual Q-network and the target network with three hidden layers and ReLu activation
        functions in between. The network uses an Adam optimizer with MSE loss."""

        self.model = Sequential()
        input_layer = Input(shape=(self.observation_size * NUM_FRAMES, ))
        layer1 = Dense(self.observation_size * NUM_FRAMES)(input_layer)
        layer1 = Activation('relu')(layer1)
        layer3 = Dense(self.observation_size)(layer1)
        layer3 = Activation('relu')(layer3)
        layer4 = Dense(2 * self.action_size)(layer3)
        layer4 = Activation('relu')(layer4)
        output = Dense(self.action_size)(layer4)

        self.model = Model(inputs=[input_layer], outputs=[output])
        self.model.compile(loss='mse', optimizer=Adam(lr=self.lr))
        self.target_model = Model(inputs=[input_layer], outputs=[output])
        self.target_model.compile(loss='mse', optimizer=Adam(lr=self.lr))
        self.target_model.set_weights(self.model.get_weights())
Пример #29
0
 def __init__(self,
              optimizer=Adam(),
              loss=categorical_crossentropy,
              metrics=[top_k_categorical_accuracy]):
     super().__init__(optimizer=optimizer,
                      loss=loss,
                      metrics=metrics,
                      MODEL_NAME=RESNET_NAME)
     self.resnet = ResNet50V2()
     self.resnet.layers[-1].activation = tf.keras.activations.linear
Пример #30
0
def compile_model(model):
    learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
        0.0003,
        decay_steps=3000,
        decay_rate=0.8)

    model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate), metrics=['acc'])
    # model.compile(loss='mse', optimizer=RMSprop(learning_rate), metrics=['mse', 'mae'])
    # model.compile(loss=my_loss_fn, optimizer=RMSprop(learning_rate), metrics=my_loss_fn)
    model.summary()
    return model