示例#1
0
    def test_ok_tau_rand(self):
        dim_x = 10
        n_elem = 100
        dim_tau = 20

        X_train = np.random.normal(size=(n_elem, dim_x)).astype(np.float32)
        TAU_train = np.random.normal(size=(n_elem, dim_tau)).astype(np.float32)

        # the keras model
        x = Input(shape=(dim_x, ), name="x")
        tau = Input(shape=(dim_tau, ), name="tau")
        res_model = Ltau(initializer='ones', use_bias=False)((x, tau))
        model = Model(inputs=[x, tau], outputs=[res_model])

        # make predictions
        res = model.predict([X_train, TAU_train])

        # LEAP Net implementation in numpy in case tau is not 0
        res_th = np.matmul(X_train, np.ones((dim_x, dim_tau),
                                            dtype=np.float32))
        res_th = np.multiply(res_th, TAU_train)
        res_th = np.matmul(res_th, np.ones((dim_tau, dim_x), dtype=np.float32))
        res_th += X_train
        assert np.mean(np.abs(res - res_th)) <= self.tol, "problem with l1"
        assert np.max(np.abs(res - res_th)) <= self.tol, "problem with linf"
示例#2
0
    def test_ok_tau0(self):
        dim_x = 10
        n_elem = 5
        dim_tau = 1

        x = Input(shape=(dim_x,), name="x")
        tau = Input(shape=(dim_tau,), name="tau")

        res_model = Ltau()((x, tau))
        model = Model(inputs=[x, tau], outputs=[res_model])

        X_train = np.random.normal(size=(n_elem, dim_x)).astype(np.float32)
        TAU_train = np.zeros(shape=(n_elem, dim_tau), dtype=np.float32)
        res = model.predict([X_train, TAU_train])
        assert np.all(res == X_train)
示例#3
0
    def test_can_learn(self):
        dim_x = 30
        n_elem = 32 * 32
        dim_tau = 5

        X_train = np.random.normal(size=(n_elem, dim_x)).astype(np.float32)
        TAU_train = np.random.normal(size=(n_elem, dim_tau)).astype(np.float32)

        e = np.random.normal(size=(dim_x, dim_tau)).astype(np.float32)
        d = np.random.normal(size=(dim_tau, dim_x)).astype(np.float32)

        Y_train = np.matmul(X_train, e)
        Y_train = np.multiply(Y_train, TAU_train)
        Y_train = np.matmul(Y_train, d)
        Y_train += X_train

        # the keras model
        x = Input(shape=(dim_x, ), name="x")
        tau = Input(shape=(dim_tau, ), name="tau")
        res_model = Ltau()((x, tau))
        model = Model(inputs=[x, tau], outputs=[res_model])

        adam_ = tf.optimizers.Adam(lr=1e-3)
        model.compile(optimizer=adam_, loss='mse')
        ## train it
        model.fit(x=[X_train, TAU_train],
                  y=[Y_train],
                  epochs=200,
                  batch_size=32,
                  verbose=False)

        # test it has learn something relevant
        X_test = np.random.normal(size=(n_elem, dim_x)).astype(np.float32)
        TAU_test = np.random.normal(size=(n_elem, dim_tau)).astype(np.float32)
        Y_test = np.matmul(X_test, e)
        Y_test = np.multiply(Y_test, TAU_test)
        Y_test = np.matmul(Y_test, d)
        Y_test += X_test
        res = model.predict([X_test, TAU_test])
        assert np.mean(
            np.abs(res - Y_test)) <= self.tol_learn, "problem with l1"
        assert np.max(
            np.abs(res - Y_test)) <= self.tol_learn, "problem with linf"
示例#4
0
    def construct_q_network(self):
        # Uses the network architecture found in DeepMind paper
        # The inputs and outputs size have changed, as well as replacing the convolution by dense layers.
        self.model = Sequential()
        input_x = Input(shape=(self.observation_size -
                               (self.tau_dim_end - self.tau_dim_start), ),
                        name="x")
        input_tau = Input(shape=(self.tau_dim_end - self.tau_dim_start, ),
                          name="tau")

        lay1 = Dense(self.observation_size)(input_x)
        lay1 = Activation('relu')(lay1)

        lay2 = Dense(self.observation_size)(lay1)
        lay2 = Activation('relu')(lay2)

        lay3 = Dense(2 * self.action_size)(lay2)  # put at self.action_size
        lay3 = Activation('relu')(lay3)

        l_tau = Ltau()((lay3, input_tau))

        fc1 = Dense(self.action_size)(l_tau)
        advantage = Dense(self.action_size)(fc1)
        fc2 = Dense(self.action_size)(lay3)
        value = Dense(1)(fc2)

        meaner = Lambda(lambda x: K.mean(x, axis=1))
        mn_ = meaner(advantage)
        tmp = subtract([advantage, mn_])
        policy = add([tmp, value], name="policy")

        self.model = Model(inputs=[input_x, input_tau], outputs=[policy])
        self.schedule_model, self.optimizer_model = self.make_optimiser()
        self.model.compile(loss='mse', optimizer=self.optimizer_model)

        self.target_model = Model(inputs=[input_x, input_tau],
                                  outputs=[policy])
        print("Successfully constructed networks.")
示例#5
0
文件: train.py 项目: thuang/leap_net
def get_model(n_gen,
              n_load,
              n_line,
              n_sub,
              dim_topo,
              dim_tau,
              lr=1e-3,
              leap=True,
              act="relu",
              builder=Dense):
    """
    Build a model from the parameters given as input.

    THis is a work in progress, but is flexible enough to code every type of neural networks used in the papers
    mentioned in the readme.

    Parameters
    ----------
    n_gen: ``int``
        Number of generator of the grid

    n_load: ``int``
        Number of loads  in the grid

    n_line: ``int``
        Numbre of powerline in the grid.

    n_sub: ``int``
        Number of substations in the grid

    dim_topo: ``int``
        Total number of objects (each ends of a powerline, a load or a generator) of the grid

    dim_tau: ``int``
        Dimention of the tau vector

    lr: ``float``
        Which learing rate to use

    leap: ``bool``
        Whether to use LEAP Net or ResNet

    act: ``str``
        Name of the activation function to use

    builder: ``keras builder``
        Typically "keras.layers.Dense". Type of layer to make.

    Returns
    -------
    model: ``keras model``
        THe compiled keras model. THis might change in the future.

    """
    facto = 3

    # encoding part
    nb_layer_enc = 0
    size_layer_enc_p = facto * 2 * n_gen
    size_layer_enc_c = facto * 2 * n_load
    size_layer_enc_t = facto * 2 * dim_tau

    # now E
    nb_layer_E = 3
    size_layer_E = facto * 3 * n_line

    # number of leap layers
    nb_leap = 3

    # now D
    nb_layer_D = 0
    size_layer_D = 25

    # regular input
    pp_ = Input(shape=(n_gen, ), name="prod_p")
    pv_ = Input(shape=(n_gen, ), name="prod_v")
    cp_ = Input(shape=(n_load, ), name="load_p")
    cq_ = Input(shape=(n_load, ), name="load_q")

    # modulator input tau
    tau_ = Input(shape=(dim_tau, ), name="tau")

    # encode regular inputs
    pp_e = encode(pp_,
                  lss=[size_layer_enc_p for _ in range(nb_layer_enc)],
                  builder=builder)
    pv_e = encode(pv_,
                  lss=[size_layer_enc_p for _ in range(nb_layer_enc)],
                  builder=builder)
    cp_e = encode(cp_,
                  lss=[size_layer_enc_c for _ in range(nb_layer_enc)],
                  builder=builder)
    cq_e = encode(cq_,
                  lss=[size_layer_enc_c for _ in range(nb_layer_enc)],
                  builder=builder)

    if not leap:
        tau_e = encode(tau_,
                       lss=[size_layer_enc_t for _ in range(nb_layer_enc)],
                       builder=builder)

    # now concatenate everything
    li = [pp_e, pv_e, cp_e, cq_e]
    if not leap:
        li.append(tau_e)
    input_E_raw = k_concatenate(li)
    input_E_raw = Activation(act)(input_E_raw)

    # scale up to have same size of the E part between ResNet and LEAPNet
    # input_E = Dense()(input_E)
    if nb_layer_enc > 0:
        size_resnet = 2 * (size_layer_enc_p +
                           size_layer_enc_c) + size_layer_enc_t
    else:
        size_resnet = 2 * (n_gen + n_load) + dim_tau
    input_E = Dense(size_resnet, name="rescale")(input_E_raw)
    input_E = Activation(act)(input_E)

    # and compute E
    E = encode(input_E,
               lss=[size_layer_E for _ in range(nb_layer_E)],
               builder=builder)

    # now apply Ltau
    tmp = E
    for i in range(nb_leap):
        if leap:
            tmp = Ltau(name="Ltau_{}".format(i))((tmp, tau_))
        else:
            tmp = ResNetLayer(dim_tau, name="RestBlock_{}".format(i))(tmp)
    E_modulated = tmp

    # decode it
    D = encode(E_modulated, lss=[size_layer_D for _ in range(nb_layer_D)])

    # linear output
    flow_a_hat = Dense(n_line, name="flow_a_hat")(D)
    flow_p_hat = Dense(n_line, name="flow_p_hat")(D)
    line_v_hat = Dense(n_line, name="line_v_hat")(D)

    model = Model(inputs=[pp_, pv_, cp_, cq_, tau_],
                  outputs=[flow_a_hat, flow_p_hat, line_v_hat])

    adam_ = tf.optimizers.Adam(lr=lr)
    model.compile(optimizer=adam_, loss='mse')
    return model