Exemplo n.º 1
0
def main():
	X_Train, X_Test, y_Train, y_Test, test_dates = load_data()

	X_Train = X_Train.values
	X_Test = X_Test.values
	y_Train = y_Train.values
	y_Test = y_Test.values

	# Construct tensors
	X_train_ = torch.from_numpy(X_Train).float().cuda()
	Y_train_ = torch.from_numpy(y_Train).float().cuda()
	X_test_ = torch.from_numpy(X_Test).float().cuda()
	Y_test_ = torch.from_numpy(y_Test).float().cuda()

	variables_rmse = {'X_train_': X_train_, 'Y_train_': Y_train_, 'X_test_': X_test_, 'Y_test_': Y_test_}

	model_rmse = model_classes.Net(X_Train, y_Train, [200, 200])
	model_rmse.cuda()

	model_rmse, iteration_list, train_loss_arr, test_loss_arr = nets.run_rmse_net(model_rmse, variables_rmse, X_Train, y_Train)

	train_rmse, test_rmse, pred_train, pred_test = nets.eval_net(model_rmse, variables_rmse)

	pred_values = pred_test.cpu().detach().numpy()
	pred_values = pd.DataFrame(pred_values, index = test_dates)
	datetime_fossil_demand(pred_values)
Exemplo n.º 2
0
def run_weighted_rmse_net_helper(X_train, Y_train, X_test, Y_test, params, weights, i):
    X_train_ = torch.tensor(X_train[:,:-1], dtype=torch.float, device=DEVICE)
    Y_train_ = torch.tensor(Y_train, dtype=torch.float, device=DEVICE)
    X_test_ = torch.tensor(X_test[:,:-1], dtype=torch.float, device=DEVICE)
    Y_test_ = torch.tensor(Y_test, dtype=torch.float, device=DEVICE)

    model = model_classes.Net(X_train[:,:-1], Y_train, [200, 200])
    if USE_GPU:
        model = model.cuda()
    opt = optim.Adam(model.parameters(), lr=1e-3)
    solver = model_classes.SolveScheduling(params)
    for j in range(100):

        model.train()
        batch_train_weightrmse(100, i*100 + j, X_train_.data, Y_train_.data, model, opt, weights.data)

    # Rebalance weights
    model.eval()
    mu_pred_train, sig_pred_train = model(X_train_)
    Y_sched_train = solver(mu_pred_train.double(), sig_pred_train.double())
    weights2 = task_loss_no_mean(
        Y_sched_train.float(), Y_train_, params)
    if USE_GPU:
        weights2 = weights2.cuda()
    model.set_sig(X_train_, Y_train_)

    return model, weights2
Exemplo n.º 3
0
def run_task_net(model, variables, params, X_train, Y_train, args):
    opt = optim.Adam(model.parameters(), lr=1e-4)
    solver = model_classes.SolveScheduling(params)

    # For early stopping
    prev_min = 0
    hold_costs = []
    model_states = []
    num_stop_rounds = 20

    for i in range(1000):
        opt.zero_grad()
        model.train()
        mu_pred_train, sig_pred_train = model(variables['X_train_'])
        Y_sched_train = solver(mu_pred_train.double(), sig_pred_train.double())
        train_loss = task_loss(
            Y_sched_train.float(),variables['Y_train_'], params)
        train_loss.sum().backward()

        model.eval()
        mu_pred_test, sig_pred_test = model(variables['X_test_'])
        Y_sched_test = solver(mu_pred_test.double(), sig_pred_test.double())
        test_loss = task_loss(
            Y_sched_test.float(), variables['Y_test_'], params)

        mu_pred_hold, sig_pred_hold = model(variables['X_hold_'])
        Y_sched_hold = solver(mu_pred_hold.double(), sig_pred_hold.double())
        hold_loss = task_loss(
            Y_sched_hold.float(), variables['Y_hold_'], params)

        opt.step()

        print(i, train_loss.sum().data[0], test_loss.sum().data[0], 
            hold_loss.sum().data[0])

        with open(os.path.join(args.save, 'task_losses.txt'), 'a') as f:
            f.write('{} {} {} {}\n'.format(i, train_loss.sum().data[0], 
                test_loss.sum().data[0], hold_loss.sum().data[0]))


        # Early stopping
        hold_costs.append(hold_loss.sum().data[0])
        model_states.append(model.state_dict().copy())
        if i > 0 and i % num_stop_rounds == 0:
            idx = hold_costs.index(min(hold_costs))
            if prev_min == hold_costs[idx]:
                model.eval()
                best_model = model_classes.Net(
                    X_train[:,:-1], Y_train, [200, 200])
                best_model.load_state_dict(model_states[idx])
                best_model.cuda()
                return best_model
            else:
                prev_min = hold_costs[idx]
                hold_costs = [prev_min]
                model_states = [model_states[idx]]

    return model
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(
        description='Run storage task net experiments.')
    parser.add_argument('--save',
                        type=str,
                        metavar='save-folder',
                        help='prefix to add to save path')
    parser.add_argument('--nRuns',
                        type=int,
                        default=10,
                        metavar='runs',
                        help='number of runs')
    parser.add_argument('--paramSet',
                        type=int,
                        choices=range(4),
                        default=0,
                        metavar='hyperparams',
                        help='(lambda, epsilon) in given row of Table 1')
    args = parser.parse_args()


    save_folder_main = 'params{}'.format(args.paramSet) if args.save is None \
        else '{}-params{}'.format(args.save, args.paramSet)
    save_folder_main = os.path.join('results', save_folder_main)

    setproctitle.setproctitle('storage-{}'.format(args.paramSet))

    # Initialize problem parameters
    params = init_params(args.paramSet)

    bsz = 500

    # Train, test split
    train_frac = 0.8

    input_tensors = get_train_test_split(params, train_frac)
    loaders = get_loaders_tt(input_tensors, bsz)

    if not os.path.exists(save_folder_main):
        os.makedirs(save_folder_main)

    for run in range(args.nRuns):

        save_folder = os.path.join(save_folder_main, str(run))
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)

        # Randomly construct hold-out set for task net training.
        tensors_task = get_train_hold_split(input_tensors, 0.8, save_folder)
        loaders_task = get_loaders_tth(tensors_task, bsz)

        # Run and eval rmse-minimizing net
        model_rmse = model_classes.Net(tensors_task['X_train'],
                                       tensors_task['Y_train'], [200, 200],
                                       params['T'])
        if USE_GPU:
            model_rmse = model_rmse.cuda()
        model_rmse = nets.run_rmse_net(model_rmse, loaders_task, params,
                                       tensors_task)
        nets.eval_net('rmse_net', model_rmse, loaders_task, params,
                      save_folder)

        # Run and eval task-minimizing net
        model_task = model_classes.Net(tensors_task['X_train'],
                                       tensors_task['Y_train'], [200, 200],
                                       params['T'])
        if USE_GPU:
            model_task = model_task.cuda()
        model_task = nets.run_rmse_net(model_task, loaders_task, params,
                                       tensors_task)  # seed with rmse soln
        model_task = \
            nets.run_task_net(model_task, loaders_task, params, args, tensors_task)
        nets.eval_net('task_net', model_task, loaders_task, params,
                      save_folder)

    calc_stats.calc_stats(
        map(lambda x: os.path.join(save_folder_main, str(x)),
            range(args.nRuns)), save_folder_main)
Exemplo n.º 5
0
def run_rmse_net(model, loaders, params, tensors_task):
    opt = optim.Adam(model.parameters(), lr=1e-3)

    # For early stopping
    prev_min = 0
    hold_costs = []
    model_states = []
    num_stop_rounds = 20

    for i in range(1000):

        # train
        model.train()
        total_train_loss = 0
        m_train = 0

        for (batch, (X_train, Y_train)) in enumerate(loaders['train']):
            if USE_GPU:
                X_train_, Y_train_ = X_train.cuda(), Y_train.cuda()
            else:
                X_train_, Y_train_ = X_train, Y_train

            opt.zero_grad()
            train_loss = nn.MSELoss()(model(X_train_), Y_train_)
            total_train_loss += train_loss.item() * X_train_.size(0)
            m_train += X_train_.size(0)
            train_loss.backward()
            opt.step()

        # evaluate on test
        model.eval()
        total_test_loss = 0
        m_test = 0

        for (batch, (X_test, Y_test)) in enumerate(loaders['test']):
            if USE_GPU:
                X_test_, Y_test_ = X_test.cuda(), Y_test.cuda()
            else:
                X_test_, Y_test_ = X_test, Y_test

            test_loss = nn.MSELoss()(model(X_test_), Y_test_)
            total_test_loss += test_loss.item() * X_test_.size(0)
            m_test += X_test_.size(0)

        model.eval()
        total_hold_loss = 0
        m_hold = 0
        for (batch, (X_hold, Y_hold)) in enumerate(loaders['hold']):
            if USE_GPU:
                X_hold_, Y_hold_ = X_hold.cuda(), Y_hold.cuda()
            else:
                X_hold_, Y_hold_ = X_hold, Y_hold

            hold_loss = nn.MSELoss()(model(X_hold_), Y_hold_)
            total_hold_loss += hold_loss.item() * X_hold_.size(0)
            m_hold += X_hold_.size(0)

        print(i, total_train_loss / m_train, total_test_loss / m_test,
              total_hold_loss / m_hold)

        # Early stopping
        hold_costs.append(total_hold_loss)
        model_states.append(model.state_dict().copy())
        if i > 0 and i % num_stop_rounds == 0:
            idx = hold_costs.index(min(hold_costs))
            if prev_min == hold_costs[idx]:
                model.eval()

                best_model = model_classes.Net(tensors_task['X_train'],
                                               tensors_task['Y_train'],
                                               [200, 200], params['T'])
                best_model.load_state_dict(model_states[idx])
                if USE_GPU:
                    best_model = best_model.cuda()

                return best_model
            else:
                prev_min = hold_costs[idx]
                hold_costs = [prev_min]
                model_states = [model_states[idx]]

    return model
Exemplo n.º 6
0
def run_task_net(model, loader, params, args, tensors_task):
    opt = optim.Adam(model.parameters(), lr=1e-4)
    solver = model_classes.ScheduleBattery(params)

    # For early stopping
    prev_min = 0
    hold_costs = []
    model_states = []
    num_stop_rounds = 20

    for i in range(1000):

        # train
        model.train()
        total_train_loss = 0
        m_train = 0
        for (batch, (X_train, Y_train)) in enumerate(loader['train']):
            opt.zero_grad()
            if USE_GPU:
                X_train, Y_train = X_train.cuda(), Y_train.cuda()
            preds_train = model(X_train)
            train_loss = task_loss(solver(preds_train), Y_train, params).sum()
            total_train_loss += train_loss.item() * X_train.size(0)
            m_train += X_train.size(0)
            train_loss.backward()

        # test
        model.eval()
        total_test_loss = 0
        m_test = 0
        for (batch, (X_test, Y_test)) in enumerate(loader['test']):
            if USE_GPU:
                X_test, Y_test = X_test.cuda(), Y_test.cuda()
            preds_test = model(X_test)
            test_loss = task_loss(solver(preds_test), Y_test, params).sum()
            total_test_loss += test_loss.item() * X_test.size(0)
            m_test += X_test.size(0)

        # hold
        model.eval()
        total_hold_loss = 0
        m_hold = 0
        for (batch, (X_hold, Y_hold)) in enumerate(loader['hold']):
            if USE_GPU:
                X_hold, Y_hold = X_hold.cuda(), Y_hold.cuda()
            preds_hold = model(X_hold)
            hold_loss = task_loss(solver(preds_hold), Y_hold, params).sum()
            total_hold_loss += hold_loss.item() * X_hold.size(0)
            m_hold += X_hold.size(0)

        print(i, total_train_loss / m_train, total_test_loss / m_test,
              total_hold_loss / m_hold)

        # Early stopping
        hold_costs.append(total_hold_loss)
        model_states.append(model.state_dict().copy())
        if i > 0 and i % num_stop_rounds == 0:
            idx = hold_costs.index(min(hold_costs))
            if prev_min == hold_costs[idx]:
                model.eval()

                best_model = model_classes.Net(tensors_task['X_train'],
                                               tensors_task['Y_train'],
                                               [200, 200], params['T'])
                best_model.load_state_dict(model_states[idx])
                if USE_GPU:
                    best_model = best_model.cuda()

                return best_model
            else:
                prev_min = hold_costs[idx]
                hold_costs = [prev_min]
                model_states = [model_states[idx]]

    return model
Exemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(
        description='Run electricity scheduling task net experiments.')
    parser.add_argument('--save',
                        type=str,
                        required=True,
                        metavar='save-folder',
                        help='save folder path')
    parser.add_argument('--nRuns',
                        type=int,
                        default=10,
                        metavar='runs',
                        help='number of runs')
    args = parser.parse_args()

    setproctitle.setproctitle('pdonti.' + args.save)

    X1, Y1 = load_data_with_features('pjm_load_data_2008-11.txt')
    X2, Y2 = load_data_with_features('pjm_load_data_2012-16.txt')

    X = np.concatenate((X1, X2), axis=0)
    Y = np.concatenate((Y1, Y2), axis=0)

    # Train, test split.
    n_tt = int(len(X) * 0.8)
    X_train, Y_train = X[:n_tt], Y[:n_tt]
    X_test, Y_test = X[n_tt:], Y[n_tt:]

    # Construct tensors (without intercepts).
    X_train_ = Variable(torch.Tensor(X_train[:, :-1])).cuda()
    Y_train_ = Variable(torch.Tensor(Y_train)).cuda()
    X_test_ = Variable(torch.Tensor(X_test[:, :-1])).cuda()
    Y_test_ = Variable(torch.Tensor(Y_test)).cuda()
    variables_rmse = {
        'X_train_': X_train_,
        'Y_train_': Y_train_,
        'X_test_': X_test_,
        'Y_test_': Y_test_
    }

    for run in range(args.nRuns):

        save_folder = os.path.join(args.save, str(run))
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)

        # Generation scheduling problem params.
        params = {"n": 24, "c_ramp": 0.4, "gamma_under": 50, "gamma_over": 0.5}

        # Run and eval rmse-minimizing net
        model_rmse = model_classes.Net(X_train[:, :-1], Y_train, [200, 200])
        model_rmse.cuda()
        model_rmse = nets.run_rmse_net(model_rmse, variables_rmse, X_train,
                                       Y_train)
        nets.eval_net("rmse_net", model_rmse, variables_rmse, params,
                      save_folder)

        # Run and eval task cost-weighted rmse-minimizing net (model defined/updated internally)
        model_rmse_weighted = nets.run_weighted_rmse_net(
            X_train, Y_train, X_test, Y_test, params)
        nets.eval_net("weighted_rmse_net", model_rmse_weighted, variables_rmse,
                      params, save_folder)

        # Randomly construct hold-out set for task net training.
        th_frac = 0.8
        inds = np.random.permutation(X_train.shape[0])
        train_inds = inds[:int(X_train.shape[0] * th_frac)]
        hold_inds = inds[int(X_train.shape[0] * th_frac):]
        X_train2, X_hold2 = X_train[train_inds, :], X_train[hold_inds, :]
        Y_train2, Y_hold2 = Y_train[train_inds, :], Y_train[hold_inds, :]
        X_train2_ = Variable(torch.Tensor(X_train2[:, :-1])).cuda()
        Y_train2_ = Variable(torch.Tensor(Y_train2)).cuda()
        X_hold2_ = Variable(torch.Tensor(X_hold2[:, :-1])).cuda()
        Y_hold2_ = Variable(torch.Tensor(Y_hold2)).cuda()
        variables_task = {
            'X_train_': X_train2_,
            'Y_train_': Y_train2_,
            'X_hold_': X_hold2_,
            'Y_hold_': Y_hold2_,
            'X_test_': X_test_,
            'Y_test_': Y_test_
        }

        # Run and eval task-minimizing net, building off rmse net results.
        model_task = model_classes.Net(X_train2[:, :-1], Y_train2, [200, 200])
        model_task.cuda()
        model_task = nets.run_rmse_net(model_task, variables_task, X_train2,
                                       Y_train2)
        model_task = nets.run_task_net(model_task, variables_task, params,
                                       X_train2, Y_train2, args)
        nets.eval_net("task_net", model_task, variables_task, params,
                      save_folder)

    plot.plot_results(
        map(lambda x: os.path.join(args.save, str(x)), range(args.nRuns)),
        args.save)