コード例 #1
0
ファイル: train.py プロジェクト: sonamghosh/whack_2018
def predict(t_net: DaRnnNet, t_dat: TrainData, train_size: int, batch_size: int, T: int, on_train=False):
    out_size = t_dat.targs.shape[1]
    if on_train:
        y_pred = np.zeros((train_size - T + 1, out_size))
    else:
        y_pred = np.zeros((t_dat.feats.shape[0] - train_size, out_size))

    for y_i in range(0, len(y_pred), batch_size):
        y_slc = slice(y_i, y_i + batch_size)
        batch_idx = range(len(y_pred))[y_slc]
        b_len = len(batch_idx)
        X = np.zeros((b_len, T - 1, t_dat.feats.shape[1]))
        y_history = np.zeros((b_len, T - 1, t_dat.targs.shape[1]))

        for b_i, b_idx in enumerate(batch_idx):
            if on_train:
                idx = range(b_idx, b_idx + T - 1)
            else:
                idx = range(b_idx + train_size - T, b_idx + train_size - 1)

            X[b_i, :, :] = t_dat.feats[idx, :]
            y_history[b_i, :] = t_dat.targs[idx]

        y_history = numpy_to_tvar(y_history)
        _, input_encoded = t_net.encoder(numpy_to_tvar(X))
        y_pred[y_slc] = t_net.decoder(input_encoded, y_history).cpu().data.numpy()

    return y_pred
コード例 #2
0
ファイル: train.py プロジェクト: sonamghosh/whack_2018
def train_iteration(t_net: DaRnnNet, loss_func: typing.Callable, X, y_history, y_target):
    t_net.enc_opt.zero_grad()
    t_net.dec_opt.zero_grad()

    input_weighted, input_encoded = t_net.encoder(numpy_to_tvar(X))
    y_pred = t_net.decoder(input_encoded, numpy_to_tvar(y_history))

    y_true = numpy_to_tvar(y_target)
    loss = loss_func(y_pred, y_true)
    loss.backward()

    t_net.enc_opt.step()
    t_net.dec_opt.step()

    return loss.item()
コード例 #3
0
ファイル: train.py プロジェクト: sonamghosh/whack_2018
def da_rnn(train_data: TrainData, n_targs: int, 
           encoder_hidden_size=64, decoder_hidden_size=64,
           T=10, learning_rate=0.01, batch_size=128):
    
    train_cfg = TrainConfig(T, int(train_data.feats.shape[0] * 0.7), batch_size, nn.MSELoss())
    logger.info(f"Training size: {train_cfg.train_size:d}.")

    enc_kwargs = {"input_size": train_data.feats.shape[1], "hidden_size": encoder_hidden_size, "T": T}
    encoder = Encoder(**enc_kwargs).to(device)
    with open(os.path.join("data", "enc_kwargs.json"), "w") as f:
        json.dump(enc_kwargs, f, indent=4)

    dec_kwargs = {"encoder_hidden_size": encoder_hidden_size,
                  "decoder_hidden_size": decoder_hidden_size, "T": T, "out_feats": n_targs}
    decoder = Decoder(**dec_kwargs).to(device)
    with open(os.path.join("data", "dec_kwargs.json"), "w") as f:
        json.dump(dec_kwargs, f, indent=4)

    encoder_optimizer = optim.Adam(
                        params=[p for p in encoder.parameters() if p.requires_grad],
                        lr=learning_rate)
    decoder_optimizer = optim.Adam(
                        params=[p for p in decoder.parameters() if p.requires_grad],
                        lr=learning_rate)
    da_rnn_net = DaRnnNet(encoder, decoder, encoder_optimizer, decoder_optimizer)

    return train_cfg, da_rnn_net
コード例 #4
0
ファイル: body.py プロジェクト: tonylibing/da-rnn-3
def predict(t_net: DaRnnNet,
            t_dat: TrainData,
            train_size: int,
            batch_size: int,
            T: int,
            on_train=False,
            eval=False):
    out_size = t_dat.targs.shape[1]
    if on_train:
        y_pred = np.zeros((train_size - T + 1, out_size))
    else:
        y_pred = np.zeros((t_dat.feats.shape[0] - train_size, out_size))
        #y_pred = np.zeros((VALI_SIZE, out_size))

    if eval:
        y_pred = np.zeros((VALI_SIZE, out_size))

    for y_i in range(0, len(y_pred), batch_size):

        y_slc = slice(y_i, y_i + batch_size)
        batch_idx = range(len(y_pred))[y_slc]
        b_len = len(batch_idx)
        X = np.zeros((b_len, T - 1, t_dat.feats.shape[1]))
        y_history = np.zeros((b_len, T - 1, t_dat.targs.shape[1]))

        for b_i, b_idx in enumerate(batch_idx):
            if on_train:
                idx = range(b_idx, b_idx + T - 1)
            else:
                # ANDREA --> The validation set is chosen at random
                # b_idx = np.random.randint(0, len(t_dat.feats)-train_size)
                idx = range(b_idx + train_size - T, b_idx + train_size - 1)

            X[b_i, :, :] = t_dat.feats[idx, :]

            ## Leave it zeros
            # y_history[b_i, :] = t_dat.targs[idx]

        y_history = numpy_to_tvar(y_history)
        _, input_encoded = t_net.encoder(numpy_to_tvar(X))
        y_pred[y_slc] = t_net.decoder(input_encoded,
                                      y_history).cpu().data.numpy()

    return y_pred
コード例 #5
0
def train_iteration(t_net: DaRnnNet, loss_func: typing.Callable, X, y_history,
                    y_target):
    t_net.enc_opt.zero_grad()
    t_net.dec_opt.zero_grad()

    X = Variable(torch.from_numpy(X).type(torch.FloatTensor).to(device))
    y_history = Variable(
        torch.from_numpy(y_history).type(torch.FloatTensor).to(device))
    y_target = Variable(
        torch.from_numpy(y_target).type(torch.FloatTensor).to(device))

    input_weighted, input_encoded = t_net.encoder(X)
    y_pred = t_net.decoder(input_encoded, y_history)

    y_true = y_target
    loss = loss_func(y_pred, y_true)
    loss.backward()

    t_net.enc_opt.step()
    t_net.dec_opt.step()

    return loss.item()
コード例 #6
0
        def set_params(train_data, device, **da_rnn_kwargs):
            train_configs = TrainConfig(da_rnn_kwargs["time_step"],
                                        int(train_data.shape[0] * 0.95),
                                        da_rnn_kwargs["batch_size"],
                                        nn.MSELoss())

            enc_kwargs = {
                "input_size": train_data.shape[1],
                "hidden_size": da_rnn_kwargs["en_hidden_size"],
                "time_step":
                int(da_rnn_kwargs["time_step"] / self.predict_size)
            }
            dec_kwargs = {
                "encoder_hidden_size": da_rnn_kwargs["en_hidden_size"],
                "decoder_hidden_size": da_rnn_kwargs["de_hidden_size"],
                "time_step":
                int(da_rnn_kwargs["time_step"] / self.predict_size),
                "out_feats": da_rnn_kwargs["target_cols"]
            }
            encoder = Encoder(**enc_kwargs).to(device)
            decoder = Decoder(**dec_kwargs).to(device)

            encoder_optimizer = optim.Adam(
                params=[p for p in encoder.parameters() if p.requires_grad],
                lr=da_rnn_kwargs["learning_rate"],
                betas=(0.9, 0.999),
                eps=1e-08)
            decoder_optimizer = optim.Adam(
                params=[p for p in decoder.parameters() if p.requires_grad],
                lr=da_rnn_kwargs["learning_rate"],
                betas=(0.9, 0.999),
                eps=1e-08)
            da_rnn_net = DaRnnNet(encoder, decoder, encoder_optimizer,
                                  decoder_optimizer)

            return train_configs, da_rnn_net
def da_rnn(train_data: TrainData,
           n_targs: int,
           learning_rate=0.01,
           encoder_hidden_size=64,
           decoder_hidden_size=64,
           T=10,
           batch_size=128):

    # passed arguments are data, n_targs=len(targ_cols), learning_rate=.001, **da_rnn_kwargs

    #here n_args : int means that this argument takes only an integer as its value
    #train_data = TrainData means that this train_data argument takes only the datatype TrainData that we have defined as its value

    training_data_size_out_of_total = train_data.feats.shape[0] * 0.7

    training_configuration = TrainConfig(T,
                                         int(training_data_size_out_of_total),
                                         batch_size, nn.MSELoss())
    '''
            class TrainConfig(typing.NamedTuple):
                T: int
                train_size: int
                batch_size: int
                loss_func: typing.Callable


            '''

    logger.info(f"Training size: {training_configuration.train_size:d}.")

    encoder_kwargs = {
        "input_size": train_data.feats.shape[1],
        "hidden_size": encoder_hidden_size,
        "T": T
    }

    encoder = Encoder(**encoder_kwargs).to(device)

    with open(os.path.join("data", "enc_kwargs.json"), "w") as fi:
        json.dump(encoder_kwargs, fi, indent=4)

    decoder_kwargs = {
        "encoder_hidden_size": encoder_hidden_size,
        "decoder_hidden_size": decoder_hidden_size,
        "T": T,
        "out_feats": n_targs
    }

    decoder = Decoder(**decoder_kwargs).to(device)

    with open(os.path.join("data", "dec_kwargs.json"), "w") as fi:
        json.dump(decoder_kwargs, fi, indent=4)

    encoder_optimizer = optim.Adam(
        params=[p for p in encoder.parameters() if p.requires_grad],
        lr=learning_rate)

    decoder_optimizer = optim.Adam(
        params=[p for p in decoder.parameters() if p.requires_grad],
        lr=learning_rate)

    da_rnn_net = DaRnnNet(
        encoder, decoder, encoder_optimizer, decoder_optimizer
    )  #-------------------------------return the DA-RNN network

    return training_configuration, da_rnn_net