Ejemplo n.º 1
0
def train(datase, args):

    if args.dataset == 'AE':
        args.data_path = '../dataset/energydata_complete.csv'
        args.inputs_index = ['Windspeed']

    if 'PM' in args.dataset:
        args.data_path = '../dataset/{}20100101_20151231.csv'.format(
            args.dataset)
        args.inputs_index = ['Iws']
    args.num_inputs = len(args.inputs_index)

    pred_length_list = [1]  ## ranging from ten minutes to 60 minutes
    dropout_list = [0, 0.1]

    for dropout, pred_length in itertools.product(dropout_list,
                                                  pred_length_list):
        args.dropout = dropout
        args.pred_length = pred_length
        args.train_length = args.timesteps + args.pred_length

        model = DMDNN_model(args, device)
        load_path = os.path.join(model.args.dataset, model.model_dir,
                                 model.args.save_model_path)
        if not os.path.exists(load_path):
            print('load path {} does not exist, training from scratch'.format(
                load_path))
            train_losses = model.train()
            model.save_model()
        else:
            print('load path exists, loading...')
            model.load_model()

        #### validation on the validation set##########
        real_y, final_generation = model.CRPS_generation(
            args.num_sampling, args.num_experiments, mode='validation'
        )  ## shape of num_experiments, num_sampling, N, dim
        CRPS_score = CRPS(real_y, final_generation, average=args.average)
        model.save_score(CRPS_score, mode='validation', metric='CRPS')
        real_y, final_generation = model.CRPS_generation(
            args.num_sampling, args.num_experiments, mode='validation'
        )  ## shape of num_experiments, num_sampling, N, dim
        RMSE_score = RMSE(real_y, final_generation, average=args.average)
        model.save_score(RMSE_score, mode='validation', metric='RMSE')

        #    #test on the test dataset
        real_y, final_generation = model.CRPS_generation(
            args.num_sampling, args.num_experiments,
            mode='test')  ## shape of num_experiments, num_sampling, N, dim
        CRPS_score = CRPS(real_y, final_generation, average=args.average)
        model.save_score(CRPS_score, mode='test', metric='CRPS')
        real_y, final_generation = model.CRPS_generation(
            args.num_sampling, args.num_experiments,
            mode='test')  ## shape of num_experiments, num_sampling, N, dim
        RMSE_score = RMSE(real_y, final_generation, average=args.average)
        model.save_score(RMSE_score, mode='test', metric='RMSE')
Ejemplo n.º 2
0
def train(dataset, args):
    if args.dataset == 'AE':
        args.data_path = '../dataset/energydata_complete.csv'
        args.inputs_index = ['Windspeed']

    if 'PM' in args.dataset:
        args.data_path = '../dataset/{}20100101_20151231.csv'.format(
            args.dataset)
        args.inputs_index = ['Iws']

    pred_length_list = [1]  ## ranging from ten minutes to 60 minutes
    scores = []
    for pred_length in pred_length_list:
        args.pred_length = pred_length
        args.train_length = args.timesteps + args.pred_length
        model = MPersistent(args, device)

        ### test stage###
        real_y, final_generation = model.pred_eval('test')

        #        ## if not denormalized, do this part
        #        real_y=real_y*model.dataset.std+model.dataset.mean
        #        final_generation=final_generation*model.dataset.std+model.dataset.mean
        #        path=model.args.test_path
        #        save_path=os.path.join(model.args.dataset, model.model_dir, path)
        #        model.save_generation(real_y, final_generation, save_path)

        MAE_score = MAE(real_y, final_generation, average=args.average)
        scores.append(MAE_score)
        model.save_score(MAE_score, mode='test', metric='MAE')

        RMSE_score = RMSE(real_y, final_generation, average=args.average)
        scores.append(RMSE_score)
        model.save_score(RMSE_score, mode='test', metric='RMSE')
    print(scores)
Ejemplo n.º 3
0
def train(dataset, args):
    if args.dataset == 'AE':
        args.data_path = '../dataset/energydata_complete.csv'
        args.inputs_index = ['Windspeed']

    if 'PM' in args.dataset:
        args.data_path = '../dataset/{}20100101_20151231.csv'.format(
            args.dataset)
        args.inputs_index = ['Iws']

    pred_length_list = [1]  ## ranging from ten minutes to 60 minutes

    for pred_length in pred_length_list:
        args.pred_length = pred_length
        args.train_length = args.timesteps + args.pred_length
        model = GP(args, device)

        ### test stage###
        real_y, final_generation = model.pred_eval(args.num_sampling,
                                                   args.num_experiments,
                                                   'test')

        MAE_score = CRPS(real_y, final_generation, average=args.average)
        model.save_score(MAE_score, mode='test', metric='CRPS')
        print(MAE_score)

        RMSE_score = RMSE(real_y, final_generation, average=args.average)
        model.save_score(RMSE_score, mode='test', metric='RMSE')
        print(RMSE_score)
            print('load path {} does not exist, training from scratch'.format(
                load_path))
            train_losses = model.train()
            model.save_model()
        else:
            print('load path exists, loading...')
            model.load_model()

        #### validation on the validation set##########
        real_y, final_prediction = model.CRPS_prediction(
            args.num_sampling, args.num_experiments, mode='validation'
        )  ## shape of num_experiments, num_sampling, N, dim
        CRPS_score = CRPS(real_y, final_prediction, average=args.average)
        model.save_score(CRPS_score, mode='validation', metric='CRPS')
        real_y, final_prediction = model.CRPS_prediction(
            args.num_sampling, args.num_experiments, mode='validation'
        )  ## shape of num_experiments, num_sampling, N, dim
        RMSE_score = RMSE(real_y, final_prediction, average=args.average)
        model.save_score(RMSE_score, mode='validation', metric='RMSE')
        #    #test on the test dataset
        real_y, final_prediction = model.CRPS_prediction(
            args.num_sampling, args.num_experiments,
            mode='test')  ## shape of num_experiments, num_sampling, N, dim
        CRPS_score = CRPS(real_y, final_prediction, average=args.average)
        model.save_score(CRPS_score, mode='test', metric='CRPS')
        real_y, final_prediction = model.CRPS_prediction(
            args.num_sampling, args.num_experiments,
            mode='test')  ## shape of num_experiments, num_sampling, N, dim
        RMSE_score = RMSE(real_y, final_prediction, average=args.average)
        model.save_score(RMSE_score, mode='test', metric='RMSE')
Ejemplo n.º 5
0
def train(dataset, args):
    if args.dataset == 'AE':
        args.data_path = '../dataset/energydata_complete.csv'
        args.inputs_index = ['Windspeed']

    if 'PM' in args.dataset:
        args.data_path = '../dataset/{}20100101_20151231.csv'.format(
            args.dataset)
        args.inputs_index = ['Iws']
    args.num_inputs = len(args.inputs_index)

    pred_length_list = [1]  ## ranging from ten minutes to 60 minutes
    dropout_list = [0, 0.1]

    for dropout, pred_length in itertools.product(dropout_list,
                                                  pred_length_list):
        args.dropout = dropout
        args.pred_length = pred_length
        args.train_length = args.timesteps + args.pred_length

        ###training stage####
        model = PANN(args, device)
        load_path = os.path.join(model.args.dataset, model.model_dir,
                                 model.args.save_model_path)
        if not os.path.exists(load_path):
            print('load path does not exist, training from scratch')
            train_losses = model.train()
            model.save_model()
        else:
            print('load path exists, loading...')
            model.load_model()

        ### validation stage###
        real_y, final_generation = model.pred_eval('validation')

        #        ## if not denormalized, do this part
        #        real_y=real_y*model.dataset.std+model.dataset.mean
        #        final_generation=final_generation*model.dataset.std+model.dataset.mean
        #        path=model.args.validation_path
        #        save_path=os.path.join(model.args.dataset, model.model_dir, path)
        #        model.save_generation(real_y, final_generation, save_path)

        MAE_score = MAE(real_y, final_generation, average=args.average)
        model.save_score(MAE_score, mode='validation', metric='MAE')
        RMSE_score = RMSE(real_y, final_generation, average=args.average)
        model.save_score(RMSE_score, mode='validation', metric='RMSE')

        ### test stage###
        real_y, final_generation = model.pred_eval('test')

        #        ## if not denormalized, do this part
        #        real_y=real_y*model.dataset.std+model.dataset.mean
        #        final_generation=final_generation*model.dataset.std+model.dataset.mean
        #        path=model.args.test_path
        #        save_path=os.path.join(model.args.dataset, model.model_dir, path)
        #        model.save_generation(real_y, final_generation, save_path)

        MAE_score = MAE(real_y, final_generation, average=args.average)
        model.save_score(MAE_score, mode='test', metric='MAE')
        RMSE_score = RMSE(real_y, final_generation, average=args.average)
        model.save_score(RMSE_score, mode='test', metric='RMSE')