Beispiel #1
0
    def __init__(self,
                 chunk_size,
                 df,
                 Y='Y18',
                 step_size=2,
                 noise=False,
                 times=1):

        self.chunk_size = chunk_size
        self.step_size = step_size
        self.noise = noise
        self.times = times
        self.y = Y

        self.DF = dataframe.Dataframe('')

        self.n_cols = self.DF.n_cols
        self.df = df

        self.X = self.df[self.DF.feature_cols].values
        self.Y = self.df[self.y].values

        self.num_x = self.X.shape[0]

        if self.step_size == 1:
            self.num_seq = ((self.num_x - self.chunk_size + 1))
        else:
            self.num_seq = ((self.num_x - self.chunk_size + 1) \
                            // self.step_size)

        self.X = torch.from_numpy(self.X).float()
        self.Y = torch.from_numpy(self.Y).float()
Beispiel #2
0
def test_predict(model,
                 chunk_size,
                 save_option=True,
                 data_dir='./data',
                 filename='submission',
                 attention=False):

    device = torch_device(model)

    model.to(device)
    model.eval()

    DF = dataframe.Dataframe(data_dir)

    pre_df = DF.get_y18_df().iloc[-chunk_size + 1:]
    test_df = DF.get_test_df()

    df = pd.concat([pre_df, test_df], axis=0)

    dataset = CustomSequenceDataset(chunk_size=chunk_size,
                                    df=df,
                                    Y='Y18',
                                    step_size=1)

    loader = DataLoader(dataset, batch_size=256, shuffle=False)

    if attention:

        y_pred = np.zeros((len(dataset), 1))
        idx = 0

        for batch, (input, _) in enumerate(loader):

            batch_size = input.size(0)
            input = input.to(device)
            pred, _ = model(input)
            y_pred[idx:idx + batch_size] = pred.cpu().data.detach().numpy()
            idx += batch_size

    else:

        input = dataset.X[chunk_size - 1:, :].unsqueeze(0)
        input = input.to(device)
        pred, _ = model(input)
        y_pred = pred.cpu().data.detach().numpy().squeeze()

    y_pred = y_pred.reshape(-1, 1)
    submission = pd.DataFrame(y_pred, index=test_df.index, columns=['Y18'])

    if save_option:
        submission.to_csv('%s/%s.csv' % (data_dir, filename))

    else:
        return submission
Beispiel #3
0
def trainset_predict(model,
                     data_dir,
                     Y,
                     chunk_size,
                     attention=False,
                     window_size=1):

    device = torch_device(model)

    model.to(device)
    model.eval()

    DF = dataframe.Dataframe(data_dir)

    if Y == 'Y18':

        df = DF.get_y18_df()

        if attention:
            pre_df = DF.get_pretrain_df().iloc[-chunk_size +
                                               1:][DF.feature_cols]
            df = pd.concat([pre_df, df], axis=0)

        y_idx = df.dropna().index
        y_true = df.dropna()[Y].values
        y_pred = np.zeros((df.shape[0] - chunk_size + 1))
        idx = 0

    else:

        df = DF.get_pretrain_df()
        df[Y] = df[Y].rolling(window=window_size, min_periods=1).mean()

        y_idx = df.index
        y_true = df[Y].values
        y_pred = np.zeros((y_true.shape[0]))

        idx = chunk_size - 1
        y_pred[:idx] = y_true[:idx]

    dataset = CustomSequenceDataset(chunk_size=chunk_size,
                                    df=df,
                                    Y=Y,
                                    step_size=1)
    loader = DataLoader(dataset, batch_size=chunk_size, shuffle=False)

    if attention:

        for batch, (input, target) in enumerate(loader):

            batch_size = input.size(0)
            input = input.to(device)
            pred, _ = model(input)
            y_pred[idx:idx+batch_size] = \
                    pred.cpu().data.detach().numpy().squeeze()
            idx += batch_size

        return y_true, y_pred, y_idx

    else:

        input = dataset.X.unsqueeze(0)
        input = input.to(device)
        pred, _ = model(input)
        y_pred = pred.cpu().data.detach().numpy().squeeze()

        return y_true, y_pred, y_idx
Beispiel #4
0
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument('--device',
                        type=str,
                        default='gpu',
                        help='For cpu: \'cpu\', for gpu: \'gpu\'')
    parser.add_argument('--chunk_size',
                        type=int,
                        default=36,
                        help='chunk size(sequence length)')
    parser.add_argument('--step_size',
                        type=int,
                        default=1,
                        help='sequence split step')
    parser.add_argument('--lr', type=float, default=5e-4, help='learning rate')
    parser.add_argument('--weight_decay',
                        type=argtype.check_float,
                        default='1e-2',
                        help='weight_decay')
    parser.add_argument('--epoch',
                        type=argtype.epoch,
                        default='inf',
                        help='the number of epoch for training')
    parser.add_argument('--batch_size',
                        type=int,
                        default=256,
                        help='size of batches for training')
    parser.add_argument('--val_ratio',
                        type=float,
                        default=.3,
                        help='validation set ratio')
    parser.add_argument('--model_name',
                        type=str,
                        default='main_model',
                        help='model name to save')
    parser.add_argument('--transfer',
                        type=argtype.boolean,
                        default=False,
                        help='whether fine tuning or not')
    parser.add_argument('--oversample_times',
                        type=int,
                        default=30,
                        help='the times oversampling times for fine tuning')
    parser.add_argument('--patience',
                        type=int,
                        default=20,
                        help='patience for early stopping')
    parser.add_argument('--c_loss',
                        type=argtype.boolean,
                        default=True,
                        help='whether using custom loss or not')
    parser.add_argument('--predict',
                        type=argtype.boolean,
                        default=False,
                        help='predict and save csv file or not')
    parser.add_argument('--filename',
                        type=str,
                        default='submission',
                        help='csv file name to save predict result')
    parser.add_argument('--Y_list',
                        type=argtype.str_to_list,
                        default='Y12,Y15',
                        help='target Y for pre-training')
    parser.add_argument('--window_size',
                        type=int,
                        default=1,
                        help='window size for moving average')
    parser.add_argument('--attention',
                        type=argtype.boolean,
                        default=True,
                        help='select model using attention mechanism')

    args = parser.parse_args()

    data_dir = './data'

    if args.device == 'gpu':
        args.device = 'cuda'
    device = torch.device(args.device)

    chunk_size = args.chunk_size
    step_size = args.step_size
    lr = args.lr
    weight_decay = args.weight_decay
    EPOCH = args.epoch
    batch_size = args.batch_size
    val_ratio = args.val_ratio
    model_name = args.model_name
    transfer_learning = args.transfer
    times = args.oversample_times
    patience = args.patience
    c_loss = args.c_loss
    pred = args.predict
    filename = args.filename
    Y_list = args.Y_list
    window_size = args.window_size
    attention = args.attention

    params = {
        'chunk_size': chunk_size,
        'step_size': step_size,
        'learning_rate': lr,
        'weight_decay': weight_decay,
        'epoch size': EPOCH,
        'batch_size': batch_size,
        'valid_ratio': val_ratio,
        'model_name': model_name,
        'transfer_learning': transfer_learning,
        'oversample_times': times,
        'early_stopping_patience': patience,
        'c_loss': c_loss,
        'pred': pred,
        'filename': filename,
        'Y_list': Y_list,
        'window_size': window_size,
        'attention': attention
    }

    Y = ''
    for y in Y_list:
        Y += y

    model_name = f'{model_name}/{Y}'

    Dataframe = dataframe.Dataframe(data_dir=data_dir)
    input_size = len(Dataframe.feature_cols)

    if attention:
        model = regressor.Attention_Regressor(input_size).to(device)
    else:
        model = regressor.BiLSTM_Regressor().to(device)

    checkpoint = Checkpoint(model_name=model_name,
                            transfer_learning=transfer_learning)
    early_stopping = Early_stopping(patience=patience)
    vis = Custom_Visdom(model_name, transfer_learning)
    vis.print_params(params)

    if transfer_learning:

        dataset_list = []

        if attention:

            pre_df = Dataframe.get_pretrain_df()\
                    .iloc[-chunk_size+1:][Dataframe.feature_cols]
            df = Dataframe.get_y18_df()

            df = pd.concat([pre_df, df], axis=0)

        else:
            df = Dataframe.get_y18_df()

        train_dataset = datasets.CustomSequenceDataset(chunk_size=chunk_size,
                                                       df=df,
                                                       Y='Y18',
                                                       step_size=step_size,
                                                       noise=True,
                                                       times=times)

        dataset_list.append(train_dataset)

        dataset = ConcatDataset(dataset_list)

        train_loader, valid_loader = datasets.split_dataset(
            dataset=dataset,
            batch_size=batch_size,
            val_ratio=val_ratio,
            shuffle=True)

        checkpoint.load_model(model)

    else:

        dataset_list = []

        for y in Y_list:

            df = Dataframe.get_pretrain_df()
            df[y] = df[y].rolling(window=window_size, min_periods=1).mean()

            dataset = datasets.CustomSequenceDataset(chunk_size=chunk_size,
                                                     df=df,
                                                     Y=y,
                                                     step_size=step_size,
                                                     noise=False,
                                                     times=1)

            dataset_list.append(dataset)

        dataset = ConcatDataset(dataset_list)

        train_loader, valid_loader = datasets.split_dataset(
            dataset=dataset,
            batch_size=batch_size,
            val_ratio=val_ratio,
            shuffle=True)

    optimizer = Adam(model.parameters(),
                     lr=lr,
                     weight_decay=float(weight_decay))

    if c_loss:
        criterion = custom_loss.mse_AIFrenz_torch
    else:
        criterion = nn.MSELoss()

    training_time = time.time()
    epoch = 0
    y_df = Dataframe.get_pretrain_df()[Y_list]
    y18_df = Dataframe.get_y18_df()[['Y18']]

    while epoch < EPOCH:

        print(f'\r Y: {Y} \
              chunk size: {chunk_size} \
              transfer: {transfer_learning}')

        epoch += 1
        train_loss_per_epoch, train_loss_list_per_batch, batch_list = train(
            model=model,
            train_loader=train_loader,
            criterion=criterion,
            optimizer=optimizer,
            epoch=epoch,
            transfer_learning=transfer_learning,
            attention=attention,
            freeze_name='transfer_layer')

        valid_loss = valid(model=model,
                           valid_loader=valid_loader,
                           criterion=criterion,
                           attention=attention)

        iter_time = time.time() - training_time

        print(
            f'\r Epoch: {epoch:3d}/{str(EPOCH):3s}\t',
            f'train time: {int(iter_time//60):2d}m {iter_time%60:5.2f}s\t'
            f'avg train loss: {train_loss_per_epoch:7.3f}\t'
            f'valid loss: {valid_loss:7.3f}')

        checkpoint.save_log(batch_list, epoch, train_loss_list_per_batch,
                            train_loss_per_epoch, valid_loss)

        early_stop, is_best = early_stopping(valid_loss)
        checkpoint.save_checkpoint(model, optimizer, is_best)

        vis.print_training(EPOCH, epoch, training_time, train_loss_per_epoch,
                           valid_loss, patience, early_stopping.counter)
        vis.loss_plot(checkpoint)

        print('-----' * 17)

        y_true, y_pred, y_idx = predict.trainset_predict(
            model=model,
            data_dir=data_dir,
            Y=Y_list[0],
            chunk_size=chunk_size,
            attention=attention,
            window_size=window_size)

        y18_true, y18_pred, y18_idx = predict.trainset_predict(
            model=model,
            data_dir=data_dir,
            Y='Y18',
            chunk_size=chunk_size,
            attention=attention,
            window_size=window_size)

        y_df['pred'] = y_pred
        y18_df['pred'] = y18_pred

        vis.predict_plot(y_df, 'pre')
        vis.predict_plot(y18_df, 'trans')
        vis.print_error()

        if early_stop:

            break

    if transfer_learning:
        checkpoint.load_model(model, transfer_learningd=True)
    else:
        checkpoint.load_model(model, transfer_learningd=False)

    y_true, y_pred, y_idx = predict.trainset_predict(model=model,
                                                     data_dir=data_dir,
                                                     Y=Y_list[0],
                                                     chunk_size=chunk_size,
                                                     attention=attention,
                                                     window_size=window_size)

    y18_true, y18_pred, y18_idx = predict.trainset_predict(
        model=model,
        data_dir=data_dir,
        Y='Y18',
        chunk_size=chunk_size,
        attention=attention,
        window_size=window_size)

    y_df['pred'] = y_pred
    y18_df['pred'] = y18_pred

    vis.predict_plot(y_df, 'pre')
    vis.predict_plot(y18_df, 'trans')
    vis.print_error()

    if pred:

        predict.test_predict(model=model,
                             chunk_size=chunk_size,
                             filename=filename,
                             attention=attention)
Beispiel #5
0
    train_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              sampler=train_sampler,
                              pin_memory=True)
    valid_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              sampler=valid_sampler,
                              pin_memory=True)

    return train_loader, valid_loader


if __name__ == '__main__':

    Dataframe = dataframe.Dataframe('./data')
    df = Dataframe.get_pretrain_df()

    dataset = CustomSequenceDataset(chunk_size=24,
                                    df=df,
                                    Y='Y09',
                                    step_size=5,
                                    noise=True,
                                    times=50)

    batch_size = 256
    validation_split = 0.5
    shuffle = False
    random_seed = 42

    dataset_size = len(dataset)