示例#1
0
def main():
    parser = argparse.ArgumentParser(
        description='Run storage task net experiments.')
    parser.add_argument('--save',
                        type=str,
                        metavar='save-folder',
                        help='prefix to add to save path')
    parser.add_argument('--nRuns',
                        type=int,
                        default=10,
                        metavar='runs',
                        help='number of runs')
    parser.add_argument('--paramSet',
                        type=int,
                        choices=range(4),
                        default=0,
                        metavar='hyperparams',
                        help='(lambda, epsilon) in given row of Table 1')
    args = parser.parse_args()


    save_folder_main = 'params{}'.format(args.paramSet) if args.save is None \
        else '{}-params{}'.format(args.save, args.paramSet)
    save_folder_main = os.path.join('results', save_folder_main)

    setproctitle.setproctitle('storage-{}'.format(args.paramSet))

    # Initialize problem parameters
    params = init_params(args.paramSet)

    bsz = 500

    # Train, test split
    train_frac = 0.8

    input_tensors = get_train_test_split(params, train_frac)
    loaders = get_loaders_tt(input_tensors, bsz)

    if not os.path.exists(save_folder_main):
        os.makedirs(save_folder_main)

    for run in range(args.nRuns):

        save_folder = os.path.join(save_folder_main, str(run))
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)

        # Randomly construct hold-out set for task net training.
        tensors_task = get_train_hold_split(input_tensors, 0.8, save_folder)
        loaders_task = get_loaders_tth(tensors_task, bsz)

        # Run and eval rmse-minimizing net
        model_rmse = model_classes.Net(tensors_task['X_train'],
                                       tensors_task['Y_train'], [200, 200],
                                       params['T'])
        if USE_GPU:
            model_rmse = model_rmse.cuda()
        model_rmse = nets.run_rmse_net(model_rmse, loaders_task, params,
                                       tensors_task)
        nets.eval_net('rmse_net', model_rmse, loaders_task, params,
                      save_folder)

        # Run and eval task-minimizing net
        model_task = model_classes.Net(tensors_task['X_train'],
                                       tensors_task['Y_train'], [200, 200],
                                       params['T'])
        if USE_GPU:
            model_task = model_task.cuda()
        model_task = nets.run_rmse_net(model_task, loaders_task, params,
                                       tensors_task)  # seed with rmse soln
        model_task = \
            nets.run_task_net(model_task, loaders_task, params, args, tensors_task)
        nets.eval_net('task_net', model_task, loaders_task, params,
                      save_folder)

    calc_stats.calc_stats(
        map(lambda x: os.path.join(save_folder_main, str(x)),
            range(args.nRuns)), save_folder_main)
示例#2
0
def main():
    parser = argparse.ArgumentParser(
        description='Run electricity scheduling task net experiments.')
    parser.add_argument('--save',
                        type=str,
                        required=True,
                        metavar='save-folder',
                        help='save folder path')
    parser.add_argument('--nRuns',
                        type=int,
                        default=10,
                        metavar='runs',
                        help='number of runs')
    args = parser.parse_args()

    setproctitle.setproctitle('pdonti.' + args.save)

    X1, Y1 = load_data_with_features('pjm_load_data_2008-11.txt')
    X2, Y2 = load_data_with_features('pjm_load_data_2012-16.txt')

    X = np.concatenate((X1, X2), axis=0)
    Y = np.concatenate((Y1, Y2), axis=0)

    # Train, test split.
    n_tt = int(len(X) * 0.8)
    X_train, Y_train = X[:n_tt], Y[:n_tt]
    X_test, Y_test = X[n_tt:], Y[n_tt:]

    # Construct tensors (without intercepts).
    X_train_ = Variable(torch.Tensor(X_train[:, :-1])).cuda()
    Y_train_ = Variable(torch.Tensor(Y_train)).cuda()
    X_test_ = Variable(torch.Tensor(X_test[:, :-1])).cuda()
    Y_test_ = Variable(torch.Tensor(Y_test)).cuda()
    variables_rmse = {
        'X_train_': X_train_,
        'Y_train_': Y_train_,
        'X_test_': X_test_,
        'Y_test_': Y_test_
    }

    for run in range(args.nRuns):

        save_folder = os.path.join(args.save, str(run))
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)

        # Generation scheduling problem params.
        params = {"n": 24, "c_ramp": 0.4, "gamma_under": 50, "gamma_over": 0.5}

        # Run and eval rmse-minimizing net
        model_rmse = model_classes.Net(X_train[:, :-1], Y_train, [200, 200])
        model_rmse.cuda()
        model_rmse = nets.run_rmse_net(model_rmse, variables_rmse, X_train,
                                       Y_train)
        nets.eval_net("rmse_net", model_rmse, variables_rmse, params,
                      save_folder)

        # Run and eval task cost-weighted rmse-minimizing net (model defined/updated internally)
        model_rmse_weighted = nets.run_weighted_rmse_net(
            X_train, Y_train, X_test, Y_test, params)
        nets.eval_net("weighted_rmse_net", model_rmse_weighted, variables_rmse,
                      params, save_folder)

        # Randomly construct hold-out set for task net training.
        th_frac = 0.8
        inds = np.random.permutation(X_train.shape[0])
        train_inds = inds[:int(X_train.shape[0] * th_frac)]
        hold_inds = inds[int(X_train.shape[0] * th_frac):]
        X_train2, X_hold2 = X_train[train_inds, :], X_train[hold_inds, :]
        Y_train2, Y_hold2 = Y_train[train_inds, :], Y_train[hold_inds, :]
        X_train2_ = Variable(torch.Tensor(X_train2[:, :-1])).cuda()
        Y_train2_ = Variable(torch.Tensor(Y_train2)).cuda()
        X_hold2_ = Variable(torch.Tensor(X_hold2[:, :-1])).cuda()
        Y_hold2_ = Variable(torch.Tensor(Y_hold2)).cuda()
        variables_task = {
            'X_train_': X_train2_,
            'Y_train_': Y_train2_,
            'X_hold_': X_hold2_,
            'Y_hold_': Y_hold2_,
            'X_test_': X_test_,
            'Y_test_': Y_test_
        }

        # Run and eval task-minimizing net, building off rmse net results.
        model_task = model_classes.Net(X_train2[:, :-1], Y_train2, [200, 200])
        model_task.cuda()
        model_task = nets.run_rmse_net(model_task, variables_task, X_train2,
                                       Y_train2)
        model_task = nets.run_task_net(model_task, variables_task, params,
                                       X_train2, Y_train2, args)
        nets.eval_net("task_net", model_task, variables_task, params,
                      save_folder)

    plot.plot_results(
        map(lambda x: os.path.join(args.save, str(x)), range(args.nRuns)),
        args.save)