Ejemplo n.º 1
0
    def init_model(num_actions,
                   learning_rate=0.00025,
                   momentum=0.95,
                   added_constant=0.01):  # env.action_space.n

        rmsprop = RMSprop(lr=learning_rate,
                          rho=momentum,
                          epsilon=added_constant)
        model = Sequential()


        model.add(Conv2D(filters=32, kernel_size=(8,8), \
            input_shape=(4, 84, 84), strides=4, activation="relu",data_format="channels_first"))
        model.add(
            Conv2D(filters=64,
                   kernel_size=(4, 4),
                   strides=2,
                   activation="relu",
                   data_format="channels_first"))
        model.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   strides=1,
                   activation="relu",
                   data_format="channels_first"))
        model.add(Flatten())
        model.add(Dense(512, activation="relu"))
        model.add(Dense(num_actions))

        model.compile(optimizer=rmsprop, loss=Huber(
            delta=1.0))  #or loss=mean_squared_error, or optimizer=adam?
        return model
Ejemplo n.º 2
0
 def _create_model(self):
     model = Sequential()
     state_shape = self.env.observation_space.__len__()
     model.add(Dense(24, input_dim=state_shape, activation="relu"))
     model.add(Dense(24, activation="relu"))
     model.add(Dense(self.env.action_space.n))
     model.compile(loss=Huber(), optimizer=Adam(lr=self.learning_rate))
     return model
Ejemplo n.º 3
0
    def critic_maker(self):
        if self.fcn == None:
            print("Initialise the fcn first")
            return

        critic = Dense(1, kernel_initializer='he_uniform')(self.fcn)
        self.critic = Model(inputs=self.input, outputs=critic)
        self.critic.compile(loss=Huber(),
                            optimizer=Adam(lr=self.learning_rate))
Ejemplo n.º 4
0
def get_model(input_shape, num_actions, learning_rate):
    X_inp = Input(shape=(input_shape, ))
    X = Dense(32, activation='relu', kernel_initializer='he_uniform')(X_inp)
    X = Dense(32, activation='relu', kernel_initializer='he_uniform')(X)
    X = Dense(num_actions,
              activation='linear',
              kernel_initializer='he_uniform')(X)
    model = Model(inputs=X_inp, outputs=X, name='DDQN-model')
    print(model.summary())
    opt = RMSprop(learning_rate=learning_rate)
    model.compile(optimizer=opt, loss=Huber(delta=1.5))
    return model
def get_critic_model(input_shape, learning_rate):
    X_inp = Input(shape=input_shape)
    # X = Conv2D(32, 8, strides=(4,4), data_format = 'channels_first',
    #            activation = 'relu')(X_inp)
    # X = Conv2D(16, 4, strides=(2,2), data_format = 'channels_first',
    #            activation = 'relu')(X)
    X = Flatten(input_shape=input_shape)(X_inp)
    X = Dense(512, activation="relu", kernel_initializer='he_uniform')(X)
    X = Dense(1, activation='linear')(X)

    model = Model(inputs=X_inp, outputs=X)
    model.compile(optimizer=Adam(learning_rate=learning_rate),
                  loss=Huber(delta=1.5))

    return model
Ejemplo n.º 6
0
 def fit_model(self):
     drp = 0.25
     n = 200
     # design network
     model = Sequential()
     # model.add(Flatten())
     # model.add(Dense(70, input_shape=(self.train_X.shape[1], self.train_X.shape[2]), activation="relu"))
     model.add(
         LSTM(n,
              input_shape=(self.train_X.shape[1], self.train_X.shape[2]),
              return_sequences=True,
              dropout=drp))
     model.add(LSTM(n, return_sequences=True, dropout=drp))
     model.add(LSTM(n, return_sequences=True, dropout=drp))
     model.add(LSTM(n, return_sequences=True, dropout=drp))
     model.add(LSTM(n, return_sequences=True, dropout=drp))
     # model.add(LSTM(n, return_sequences=True, dropout=drp))
     # model.add(LSTM(n, return_sequences=True, dropout=drp))
     # model.add(LSTM(n, return_sequences=True, dropout=drp))
     # model.add(LSTM(n, return_sequences=True, dropout=drp))
     # model.add(LSTM(n, return_sequences=True, dropout=drp))
     # model.add(LSTM(n, return_sequences=True, dropout=drp))
     model.add(LSTM(n, dropout=drp))
     model.add(Dense(n))
     model.add(PReLU(alpha_initializer=Constant(value=0.25)))
     model.add(Dropout(drp))
     # model.add(Dense(n, activation="relu"))
     # model.add(Dropout(drp))
     # model.add(Dense(n, activation="tanh"))
     # model.add(Dropout(drp))
     # model.add(Dense(44))
     # model.add(Dense(15, activation="tanh"))
     # model.add(Dropout(drp))
     model.add(Dense(100))
     model.add(Dense(1))
     model.compile(loss=Huber(delta=10.0), optimizer='adamax')
     # fit network
     # reshape(self.train_X, (self.n_batch, self.train_X.shape[0], self.train_X.shape[1], self.train_X.shape[2]))
     history = model.fit(self.train_X,
                         self.train_y,
                         epochs=self.epochs,
                         batch_size=self.n_batch,
                         validation_data=(self.test_X, self.test_y),
                         verbose=2,
                         shuffle=True)
     model.save("C:/Users/markk/Desktop/Brandywine/Model.h5")
     return model, history
def get_model(input_shape, num_actions, learning_rate):
    X_inp = Input(shape = (input_shape,))
    X = Dense(32, activation='relu', kernel_initializer='he_uniform')(X_inp)
    X = Dense(32, activation='relu', kernel_initializer='he_uniform')(X)

    state_value = Dense(1, kernel_initializer='he_uniform', activation='linear')(X)
    state_value = Lambda(lambda s: K.expand_dims(s[:, 0], -1), output_shape=(num_actions,))(state_value)
    
    action_advantage = Dense(num_actions, kernel_initializer='he_uniform', activation='linear')(X)
    action_advantage = Lambda(lambda a: a[:, :] - K.mean(a[:, :], keepdims=True), output_shape=(num_actions,))(action_advantage)
    
    X = Add()([state_value, action_advantage])
    model = Model(inputs = X_inp, outputs = X, name = 'DDDQN-model')
    print(model.summary())
    opt = RMSprop(learning_rate = learning_rate)
    model.compile(optimizer = opt, loss = Huber(delta = 1.5))
    return model
Ejemplo n.º 8
0
    def init_model_no_convolutions(num_actions,
                                   learning_rate=0.00025,
                                   momentum=0.95,
                                   added_constant=0.01):
        rmsprop = RMSprop(lr=learning_rate,
                          rho=momentum,
                          epsilon=added_constant)
        adam = Adam(lr=learning_rate, clipvalue=1.)
        model = Sequential()
        model.add(Dense(8, input_shape=(4, 4), activation="relu"))
        model.add(Dropout(0.05))
        model.add(Flatten())
        model.add(Dense(num_actions))

        model.compile(optimizer=adam, loss=Huber(
            delta=1.0))  #or loss=mean_squared_error, or optimizer=adam?
        return model
def create_detector_model(class_n, roip_fm_shape, class_indices_dict, seed=42):
    he_init = he_uniform(seed)
    gl_init = glorot_uniform(seed)
    input = Input(roip_fm_shape)
    output = Flatten(name='DetectorFlatten')(input)
    output = Dense(units=500,
                   kernel_initializer=he_init,
                   activation='relu',
                   name='DetectorDense1')(output)
    output = Dense(units=500,
                   kernel_initializer=he_init,
                   activation='relu',
                   name='DetectorDense2')(output)
    cls_logits = Dense(
        units=class_n + 1,  # Add background class
        kernel_initializer=gl_init,
        activation='linear',
        name='DetectorClsLogits')(output)
    cls = Activation('softmax', name='DetectorCls')
    reg = Dense(units=4 * class_n,
                kernel_initializer=he_init,
                activation='linear',
                name='DetectorReg')(output)
    reg = Reshape(target_shape=(-1, 4))(reg)
    model = Model(inputs=[input], outputs=[cls_logits, cls, reg])
    # Since only deltas from respective class contribute to the loss
    reg_loss = on_index_wrapper(Huber(), class_indices_dict['not_sign'])
    # Computes metric on batch - Keras can`t reset metric on batch end if it is wrapped
    reg_metric = on_index_wrapper(mean_absolute_error,
                                  class_indices_dict['not_sign'])
    model.compile(optimizer='adadelta',
                  loss={
                      'DetectorClsLogits': SparseCategoricalCrossentropy(),
                      'DetectorCls': None,
                      'DetectorReg': reg_loss
                  },
                  metrics={
                      'DetectorCls': SparseCategoricalAccuracy(),
                      'DetectorReg': reg_metric
                  })
    return model