Exemplo n.º 1
0
def data_transform(opt):
    """
    From raw data to train, val and test dataset correctly normalised and min-max values to undo
    the transformation
    """
    data = torch.Tensor(
        np.genfromtxt(os.path.join('..', opt.datadir, opt.dataset)))
    X_train, Y_train, X_val, Y_val, X_test, Y_test, min_value, max_value = prepare_datasets(
        data, opt.n_inp, opt.n_out)

    return (X_train, Y_train, X_val, Y_val, X_test, Y_test, min_value,
            max_value)
Exemplo n.º 2
0
def data_transform(opt):
    """
    From raw data to train, val and test dataset correctly normalised and min-max values to undo
    the transformation
    """
    data = torch.Tensor(
        np.genfromtxt(os.path.join('..', opt.datadir, opt.dataset)))

    X_train_time, Y_train_time, X_val_time, Y_val_time, X_test_time, Y_test_time, min_value_time, max_value_time, \
    X_train_space, Y_train, X_val_space, Y_val, X_test_space, Y_test, min_value_space, max_value_space, \
    X_train_exo, X_val_exo, X_test_exo, min_value_exo, max_value_exo = \
            prepare_datasets(data, opt.n_inp_sp, opt.n_out_sp, opt.n_inp_tem, opt.n_exo, opt.dim_x, opt.dim_y)

    return (X_train_time, Y_train_time, X_val_time, Y_val_time, X_test_time, Y_test_time, min_value_time, max_value_time, \
            X_train_space, Y_train, X_val_space, Y_val, X_test_space, Y_test, min_value_space, max_value_space, \
            X_train_exo, X_val_exo, X_test_exo, min_value_exo, max_value_exo)
Exemplo n.º 3
0
Arquivo: run.py Projeto: akuzeee/AFLAC
def run_experiment(meta_cfg, train_cfg):
    writer = SummaryWriter(log_dir=log_dir)
    dataset_class = meta_cfg['dataset_class']
    test_key = str(meta_cfg['test_key'])
    train = meta_cfg['train']
    results = []

    train_dataset, valid_dataset, test_dataset = prepare_datasets(
        test_key, meta_cfg['validation'], dataset_class, True,
        meta_cfg['seed'])
    best_acc = train(writer, train_dataset, valid_dataset, test_dataset,
                     **train_cfg)
    results.append([test_key, best_acc])

    for test_domain_key, test_acc in results:
        result = '| %s | %.3f |' % (test_domain_key, test_acc)
        print(result)
        writer.add_scalar(meta_cfg['dataset_name'] + '_' + test_domain_key,
                          test_acc, 0)
Exemplo n.º 4
0
def main():

    '''
    Load and preprocess data
    '''

    # Select relevant cached features
    train_feats_list = [
        # '../features/train_stats_v10.h5',
        '../features/train_delta_v9.h5',
        # '../features/train_peak_v9.h5',
        # '../features/train_roll_v9.h5',
    ]
    test_feats_list = [
        '../features/test_stats_v9.h5',
        '../features/test_delta_v9.h5',
        '../features/test_peak_v9.h5',
        '../features/test_roll_v9.h5',
    ]

    train, test, y_tgt = prepare_datasets(train_feats_list, test_feats_list)

    # Select models to train
    controls = {
        'lgbm-models'   : bool(1),
    }

    feat_blacklist = [ 'std_abs_delta', 'std_rolling_std_500', 'kurtosis']

    '''
    LGBM Models
    '''

    seed = 42
    model_name = f'm0_v9_1'

    if controls['lgbm-models']:

        lgbm_params = {
            'num_leaves' : 4,
            'learning_rate': 0.5,
            'min_child_samples' : 300,
            'n_estimators': 1000,
            'reg_lambda': 1,
            'bagging_fraction' : 0.6,
            'bagging_freq' : 1,
            'bagging_seed' : seed,
            'silent': 1,
            'verbose': 1,
        }

        lgbm_model_0 = LgbmModel(
            train=train,
            test=test,
            y_tgt=y_tgt,
            output_dir='../level_1_preds/',
            fit_params=lgbm_params,
            sample_weight=1.0,
            postprocess_sub=True,
            feat_blacklist=feat_blacklist,
            cv_random_seed=seed,
        )

        lgbm_model_0.fit_predict(
            iteration_name=model_name,
            predict_test=False,
            save_preds=False,
            produce_sub=False,
            save_imps=True,
            save_aux_visu=False,
        )
Exemplo n.º 5
0
# Other configuration
parser.add_argument('--no-cuda',
                    action='store_true',
                    default=True,
                    help='disables CUDA training')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(0)
np.random.seed(0)
if args.cuda:
    torch.cuda.manual_seed(0)

############# fetch torch Datasets ###################
######### you may change the dataset split % #########
train_set, val_set, test_set = prepare_datasets(splits=[0.7, 0.15, 0.15])

############# create torch DataLoaders ###############
########### you may change the batch size ############
train_loader = torch.utils.data.DataLoader(train_set,
                                           batch_size=args.batch_size,
                                           shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=1000)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=1000)

################ initialize the model ################
if args.model == 'convnet':
    model = ConvNet()
elif args.model == 'mymodel':
    model = MyModel()
else: