示例#1
0
def da_rnn(train_data: TrainData, n_targs: int, 
           encoder_hidden_size=64, decoder_hidden_size=64,
           T=10, learning_rate=0.01, batch_size=128):
    
    train_cfg = TrainConfig(T, int(train_data.feats.shape[0] * 0.7), batch_size, nn.MSELoss())
    logger.info(f"Training size: {train_cfg.train_size:d}.")

    enc_kwargs = {"input_size": train_data.feats.shape[1], "hidden_size": encoder_hidden_size, "T": T}
    encoder = Encoder(**enc_kwargs).to(device)
    with open(os.path.join("data", "enc_kwargs.json"), "w") as f:
        json.dump(enc_kwargs, f, indent=4)

    dec_kwargs = {"encoder_hidden_size": encoder_hidden_size,
                  "decoder_hidden_size": decoder_hidden_size, "T": T, "out_feats": n_targs}
    decoder = Decoder(**dec_kwargs).to(device)
    with open(os.path.join("data", "dec_kwargs.json"), "w") as f:
        json.dump(dec_kwargs, f, indent=4)

    encoder_optimizer = optim.Adam(
                        params=[p for p in encoder.parameters() if p.requires_grad],
                        lr=learning_rate)
    decoder_optimizer = optim.Adam(
                        params=[p for p in decoder.parameters() if p.requires_grad],
                        lr=learning_rate)
    da_rnn_net = DaRnnNet(encoder, decoder, encoder_optimizer, decoder_optimizer)

    return train_cfg, da_rnn_net
示例#2
0
        def set_params(train_data, device, **da_rnn_kwargs):
            train_configs = TrainConfig(da_rnn_kwargs["time_step"],
                                        int(train_data.shape[0] * 0.95),
                                        da_rnn_kwargs["batch_size"],
                                        nn.MSELoss())

            enc_kwargs = {
                "input_size": train_data.shape[1],
                "hidden_size": da_rnn_kwargs["en_hidden_size"],
                "time_step":
                int(da_rnn_kwargs["time_step"] / self.predict_size)
            }
            dec_kwargs = {
                "encoder_hidden_size": da_rnn_kwargs["en_hidden_size"],
                "decoder_hidden_size": da_rnn_kwargs["de_hidden_size"],
                "time_step":
                int(da_rnn_kwargs["time_step"] / self.predict_size),
                "out_feats": da_rnn_kwargs["target_cols"]
            }
            encoder = Encoder(**enc_kwargs).to(device)
            decoder = Decoder(**dec_kwargs).to(device)

            encoder_optimizer = optim.Adam(
                params=[p for p in encoder.parameters() if p.requires_grad],
                lr=da_rnn_kwargs["learning_rate"],
                betas=(0.9, 0.999),
                eps=1e-08)
            decoder_optimizer = optim.Adam(
                params=[p for p in decoder.parameters() if p.requires_grad],
                lr=da_rnn_kwargs["learning_rate"],
                betas=(0.9, 0.999),
                eps=1e-08)
            da_rnn_net = DaRnnNet(encoder, decoder, encoder_optimizer,
                                  decoder_optimizer)

            return train_configs, da_rnn_net
def da_rnn(train_data: TrainData,
           n_targs: int,
           learning_rate=0.01,
           encoder_hidden_size=64,
           decoder_hidden_size=64,
           T=10,
           batch_size=128):

    # passed arguments are data, n_targs=len(targ_cols), learning_rate=.001, **da_rnn_kwargs

    #here n_args : int means that this argument takes only an integer as its value
    #train_data = TrainData means that this train_data argument takes only the datatype TrainData that we have defined as its value

    training_data_size_out_of_total = train_data.feats.shape[0] * 0.7

    training_configuration = TrainConfig(T,
                                         int(training_data_size_out_of_total),
                                         batch_size, nn.MSELoss())
    '''
            class TrainConfig(typing.NamedTuple):
                T: int
                train_size: int
                batch_size: int
                loss_func: typing.Callable


            '''

    logger.info(f"Training size: {training_configuration.train_size:d}.")

    encoder_kwargs = {
        "input_size": train_data.feats.shape[1],
        "hidden_size": encoder_hidden_size,
        "T": T
    }

    encoder = Encoder(**encoder_kwargs).to(device)

    with open(os.path.join("data", "enc_kwargs.json"), "w") as fi:
        json.dump(encoder_kwargs, fi, indent=4)

    decoder_kwargs = {
        "encoder_hidden_size": encoder_hidden_size,
        "decoder_hidden_size": decoder_hidden_size,
        "T": T,
        "out_feats": n_targs
    }

    decoder = Decoder(**decoder_kwargs).to(device)

    with open(os.path.join("data", "dec_kwargs.json"), "w") as fi:
        json.dump(decoder_kwargs, fi, indent=4)

    encoder_optimizer = optim.Adam(
        params=[p for p in encoder.parameters() if p.requires_grad],
        lr=learning_rate)

    decoder_optimizer = optim.Adam(
        params=[p for p in decoder.parameters() if p.requires_grad],
        lr=learning_rate)

    da_rnn_net = DaRnnNet(
        encoder, decoder, encoder_optimizer, decoder_optimizer
    )  #-------------------------------return the DA-RNN network

    return training_configuration, da_rnn_net