Exemple #1
0
def disagg_fold_new(fold_num, appliance, cell_type, hidden_size, num_layers,
                    bidirectional, lr, num_iterations):
    torch.manual_seed(0)

    appliance_num = APPLIANCE_ORDER.index(appliance)
    train, test = get_train_test(num_folds=num_folds, fold_num=fold_num)

    train = np.vstack([train, aug_data[:num_aug]])

    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)

    train_appliance = train[:, appliance_num, :, :].reshape(-1, 24, 1)
    test_appliance = test[:, appliance_num, :, :].reshape(-1, 24, 1)
    gts.append(test_appliance.reshape(-1, 24))
    loss_func = nn.L1Loss()
    r = CustomRNN(cell_type, hidden_size, num_layers, bidirectional)

    if cuda_av:
        r = r.cuda()
        loss_func = loss_func.cuda()

    # Setting the params all to be non-negative
    #for param in r.parameters():
    #    param.data = param.data.abs()

    optimizer = torch.optim.Adam(r.parameters(), lr=lr)

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    test_y = Variable(torch.Tensor(test_appliance), requires_grad=False)

    prediction_fold = {}

    for t in range(1, num_iterations + 1):

        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        train_y = Variable(torch.Tensor(train_appliance))
        if cuda_av:
            inp = inp.cuda()
            train_y = train_y.cuda()
        pred = r(inp)

        optimizer.zero_grad()
        loss = loss_func(pred, train_y)
        if t % 100 == 0:
            print(t, loss.data[0])
        loss.backward()
        optimizer.step()

        if t % 200 == 0 and t != 0:
            if cuda_av:
                test_inp = test_inp.cuda()
            pred_test = r(test_inp)
            pred_test = torch.clamp(pred_test, min=0.)
            if cuda_av:
                prediction_fold[t] = pred_test.cpu().data.numpy()
            else:
                prediction_fold[t] = pred_test.data.numpy()

    return prediction_fold, test_appliance
def discriminative(dataset, cur_fold, num_latent, num_iterations):

    # for cur_fold in range(5):
    train, test = get_train_test(dataset,
                                 num_folds=num_folds,
                                 fold_num=cur_fold)
    #train, valid = train_test_split(train, test_size=0.2, random_state=0)
    valid = train[int(0.8 * len(train)):].copy()
    train = train[:int(0.8 * len(train))].copy()

    valid_gt = valid[:, 1:, :, :]
    test_gt = test[:, 1:, :, :]

    train_sc, valid_sc = reshape_for_sc(train), reshape_for_sc(valid)
    train_data = np.array(
        [train_sc[:, :, i] for i in range(1, train.shape[1])]).swapaxes(1, 2)
    c = SparseCoding()
    c.train(train_data, num_latent=num_latent)
    valid_pred = c.disaggregate_discriminative(train_sc[:, :,
                                                        0].swapaxes(0, 1),
                                               valid_sc[:, :,
                                                        0].swapaxes(0, 1),
                                               num_iter=num_iterations)
    valid_pred = valid_pred[-1, :, :, :]
    valid_pred = valid_pred.swapaxes(0, 2).swapaxes(1, 2)
    valid_pred = valid_pred.reshape(valid_pred.shape[0], valid_pred.shape[1],
                                    -1, 24)

    valid_pred = np.minimum(valid_pred, valid_gt[:, 0:1, :, :])

    valid_error = {
        APPLIANCE_ORDER[i + 1]:
        mean_absolute_error(valid_pred[:, i, :, :].flatten(),
                            valid_gt[:, i, :, :].flatten())
        for i in range(valid_pred.shape[1])
    }

    train_sc, test_sc = reshape_for_sc(train), reshape_for_sc(test)
    train_data = np.array(
        [train_sc[:, :, i] for i in range(1, train.shape[1])]).swapaxes(1, 2)
    c = SparseCoding()
    c.train(train_data, num_latent=num_latent)
    test_pred = c.disaggregate_discriminative(train_sc[:, :, 0].swapaxes(0, 1),
                                              test_sc[:, :, 0].swapaxes(0, 1),
                                              num_iter=num_iterations)
    test_pred = test_pred[-1, :, :, :]
    test_pred = test_pred.swapaxes(0, 2).swapaxes(1, 2)
    test_pred = test_pred.reshape(test_pred.shape[0], test_pred.shape[1], -1,
                                  24)

    test_pred = np.minimum(test_pred, test_gt[:, 0:1, :, :])

    test_error = {
        APPLIANCE_ORDER[i + 1]:
        mean_absolute_error(test_pred[:, i, :, :].flatten(),
                            test_gt[:, i, :, :].flatten())
        for i in range(test_pred.shape[1])
    }

    return valid_pred, valid_error, valid_gt, test_pred, test_error, test_gt
def nested_stf(dataset, cur_fold, r, lr, num_iter):

    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=cur_fold)
    #train, valid = train_test_split(train, test_size=0.2, random_state=0)
    valid = train[int(0.8*len(train)):].copy()
    train = train[:int(0.8 * len(train))].copy()
    
    valid_gt = valid[:, 1:, :, :]
    test_gt = test[:, 1:, :, :]


    valid_copy = valid.copy()
    valid_copy[:, 1:, :, :] =np.NaN
    train_valid = np.concatenate([train, valid_copy])
    H, A, D, T = stf_4dim_time(tensor=train_valid, r=r, lr=lr, num_iter=num_iter)
    valid_pred = np.einsum("Hr, Ar, Dr, ATr -> HADT", H, A, D, T)[len(train):, 1:, :, :]
    valid_error = {APPLIANCE_ORDER[i+1]:mean_absolute_error(valid_pred[:, i,:,:].flatten(), 
                                                                       valid_gt[:, i, :, :].flatten()) for i in range(valid_pred.shape[1])}
    
    test_copy = test.copy()
    test_copy[:, 1:, :, :] =np.NaN
    train_test = np.concatenate([train, test_copy])
    H, A, D, T = stf_4dim_time(tensor=train_test, r=r, lr=lr, num_iter=num_iter)
    test_pred = np.einsum("Hr, Ar, Dr, ATr -> HADT", H, A, D, T)[len(train):, 1:, :, :]
    test_error = {APPLIANCE_ORDER[i+1]:mean_absolute_error(test_pred[:, i,:,:].flatten(), 
                                                                       test_gt[:, i, :, :].flatten()) for i in range(test_pred.shape[1])}
    
    return valid_pred, valid_error, valid_gt, test_pred, test_error, test_gt
Exemple #4
0
def nested_stf(dataset, cur_fold, r, lr, num_iter):
    # valid_error = {}
    # out = []
    # for cur_fold in range(5):
    # valid_error = {}
    train, test = get_train_test(dataset,
                                 num_folds=num_folds,
                                 fold_num=cur_fold)
    #train, valid = train_test_split(train, test_size=0.2, random_state=0)
    valid = train[int(0.8 * len(train)):].copy()
    train = train[:int(0.8 * len(train))].copy()

    valid_gt = valid[:, 1:, :, :]
    test_gt = test[:, 1:, :, :]

    # print ("fold: ", cur_fold, " num_latent: ", r, " lr: ", lr, " num_iter: ", num_iter)
    #for valid data
    valid_copy = valid.copy()
    valid_copy[:, 1:, :, :] = np.NaN
    train_valid = np.concatenate([train, valid_copy])
    H, A, D, T = stf_4dim(tensor=train_valid, r=r, lr=lr, num_iter=num_iter)
    valid_pred = np.einsum("Hr, Ar, Dr, Tr ->HADT", H, A, D, T)[len(train):,
                                                                1:, :, :]
    valid_error = {
        APPLIANCE_ORDER[i + 1]:
        mean_absolute_error(valid_pred[:, i, :, :].flatten(),
                            valid_gt[:, i, :, :].flatten())
        for i in range(valid_pred.shape[1])
    }

    #for test data

    return valid_pred, valid_error, valid_gt
Exemple #5
0
def stf(r=2, lr=1, num_iter=100):
    out = []
    for cur_fold in range(5):
        train, test = get_train_test(num_folds=num_folds, fold_num=cur_fold)
        test_copy = test.copy()
        test_copy[:, 1:, :, :] = np.NaN
        train_test = np.concatenate([train, test_copy])
        H, A, D, T = stf_4dim(tensor=train_test, r=r, lr=lr, num_iter=num_iter)
        pred = np.einsum("Hr, Ar, Dr, Tr ->HADT", H, A, D, T)[len(train):,
                                                              1:, :, :]
        out.append(pred)
    return np.concatenate(out)
def stf(dataset, cur_fold, r=2, lr=1, num_iter=100):
    num_folds = 5
    train, test = get_train_test(dataset,
                                 num_folds=num_folds,
                                 fold_num=cur_fold)
    test_copy = test.copy()
    test_copy[:, 1:, :, :] = np.NaN
    train_test = np.concatenate([train, test_copy])
    H, A, D, T = stf_4dim(tensor=train_test, r=r, lr=lr, num_iter=num_iter)
    pred = np.einsum("Hr, Ar, Dr, Tr ->HADT", H, A, D, T)[len(train):,
                                                          1:, :, :]

    return pred
def non_discriminative(num_latent):
    out = []
    for cur_fold in range(5):
        train, test = get_train_test(num_folds=num_folds, fold_num=cur_fold)
        train_sc, test_sc = reshape_for_sc(train), reshape_for_sc(test)
        train_data = np.array([
            train_sc[:, :, i] for i in range(1, train.shape[1])
        ]).swapaxes(1, 2)
        c = SparseCoding()
        c.train(train_data, num_latent=num_latent)
        pred = c.disaggregate(test_sc[:, :, 0].swapaxes(0, 1)).swapaxes(
            0, 2).swapaxes(1, 2)
        pred = pred.reshape(pred.shape[0], pred.shape[1], -1, 24)
        out.append(pred)
    return np.concatenate(out)
def disagg_fold(fold_num, dataset, cell_type, hidden_size, num_layers, bidirectional, lr, num_iterations, p):
    # print (fold_num, hidden_size, num_layers, bidirectional, lr, num_iterations, p)
    #print (ORDER)
    torch.manual_seed(0)

    num_folds=5
    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    # from sklearn.model_selection import train_test_split
    # train, valid = train_test_split(train, test_size=0.2, random_state=0)

    valid = train[int(0.8*len(train)):].copy()
    train = train[:int(0.8 * len(train))].copy()


    train_aggregate = train[:, 0, :, :].reshape(-1, train.shape[3], 1)
    valid_aggregate = valid[:, 0, :, :].reshape(-1, train.shape[3], 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, train.shape[3], 1)


    #print (train.shape)
    #print (valid.shape)
    #print (test.shape)

    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_valid = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_valid[a_num] = Variable(
            torch.Tensor(valid[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((valid_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_valid[a_num] = out_valid[a_num].cuda()

    out_test = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_test[a_num] = Variable(
            torch.Tensor(test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((test_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_test[a_num] = out_test[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, len(ORDER))
    # prevent negative
    #for param in a.parameters():
    #    param.data = param.data.abs()
    #print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    inp = Variable(torch.Tensor(train_aggregate.reshape((train_aggregate.shape[0], -1, 1))).type(torch.FloatTensor),
                   requires_grad=True)

    valid_inp = Variable(torch.Tensor(valid_aggregate), requires_grad=False)
    if cuda_av:
        valid_inp = valid_inp.cuda()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()

    valid_pred = {}
    train_pred = {}
    test_pred = {}
    test_losses = {}
    valid_losses = {}

    for t in range(1, num_iterations+1):
        idx_train = Variable(torch.LongTensor(np.random.choice(range(train_aggregate.shape[0]), 50, replace=True)))
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)

        valid_out = torch.cat([out_valid[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        test_out = torch.cat([out_test[appliance_num] for appliance_num, appliance in enumerate(ORDER)])

        if cuda_av:
            idx_train = idx_train.cuda()
            out = torch.cat(
                [out_train[appliance_num].index_select(0, idx_train) for appliance_num, appliance in enumerate(ORDER)])
            inp = inp.cuda().index_select(0, idx_train)
            out = out.cuda()
        else:
            inp = inp.index_select(0, idx_train)
            out = torch.cat(
                [out_train[appliance_num].index_select(0, idx_train) for appliance_num, appliance in enumerate(ORDER)])

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 50 == 0:
            #print(t, loss.data[0])

            if cuda_av:
                valid_inp = valid_inp.cuda()
            valid_params = [valid_inp, -2]
            for i in range(len(ORDER)):
                valid_params.append(None)
            valid_pr = a(*valid_params)
            valid_loss = loss_func(valid_pr, valid_out)

            if cuda_av:
                test_inp = test_inp.cuda()
            test_params = [test_inp, -2]
            for i in range(len(ORDER)):
                test_params.append(None)
            test_pr = a(*test_params)
            test_loss = loss_func(test_pr, test_out)

            test_losses[t] = test_loss.data[0]
            valid_losses[t] = valid_loss.data[0]
            # np.save("./baseline/p_50_loss")

            if t % 1000 == 0:
                valid_pr = torch.clamp(valid_pr, min=0.)
                valid_pred[t] = valid_pr
                test_pr = torch.clamp(test_pr, min=0.)
                test_pred[t] = test_pr
                train_pr = pred
                train_pr = torch.clamp(train_pr, min=0.)
                train_pred[t] = train_pr

            #print("Round:", t, "Training Error:", loss.data[0], "Validation Error:", valid_loss.data[0], "Test Error:", test_loss.data[0])

        loss.backward()
        optimizer.step()

    # store training prediction
    # train_pred = torch.clamp(pred, min=0.)
    # train_pred = torch.split(train_pred, train_aggregate.shape[0])
    train_fold = [None for x in range(len(ORDER))]
    # if cuda_av:
    #     for appliance_num, appliance in enumerate(ORDER):
    #         train_fold[appliance_num] = train_pred[appliance_num].cpu().data.numpy().reshape(-1, 24)
    # else:
    #     for appliance_num, appliance in enumerate(ORDER):
    #         train_fold[appliance_num] = train_pred[appliance_num].data.numpy().reshape(-1, 24)


            # test one validation set

    valid_fold = {}
    for t in range(1000, num_iterations + 1, 1000):

        valid_pred[t] = torch.split(valid_pred[t], valid_aggregate.shape[0])
        valid_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][appliance_num].cpu().data.numpy().reshape(-1, valid.shape[3])
        else:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][appliance_num].data.numpy().reshape(-1, valid.shape[3])

    test_fold = {}
    for t in range(1000, num_iterations + 1, 1000):

        test_pred[t] = torch.split(test_pred[t], test_aggregate.shape[0])
        test_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                test_fold[t][appliance_num] = test_pred[t][appliance_num].cpu().data.numpy().reshape(-1, valid.shape[3])
        else:
            for appliance_num, appliance in enumerate(ORDER):
                test_fold[t][appliance_num] = test_pred[t][appliance_num].data.numpy().reshape(-1, valid.shape[3])

    # store ground truth of validation set
    valid_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        valid_gt_fold[appliance_num] = valid[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
            valid_aggregate.shape[0],
            -1, 1).reshape(-1, valid.shape[3])

    test_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        test_gt_fold[appliance_num] = test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
            test_aggregate.shape[0],
            -1, 1).reshape(-1, test.shape[3])

    # calcualte the error of validation set
    valid_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        valid_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            valid_error[t][appliance] = mean_absolute_error(valid_fold[t][appliance_num], valid_gt_fold[appliance_num])

    test_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        test_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            test_error[t][appliance] = mean_absolute_error(test_fold[t][appliance_num], test_gt_fold[appliance_num])

    return train_fold, valid_fold, test_fold, valid_error, test_error, valid_losses, test_losses
def disagg_fold(fold_num, dataset, cell_type, hidden_size, num_layers, bidirectional, lr, num_iterations, p):
    # print (fold_num, hidden_size, num_layers, bidirectional, lr, num_iterations, p)
    print (ORDER)
    torch.manual_seed(0)

    num_folds=5
    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    from sklearn.model_selection import train_test_split
    train, valid = train_test_split(train, test_size=0.2, random_state=0)


    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    valid_aggregate = valid[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)


    print (train.shape)
    print (valid.shape)
    print (test.shape)

    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, len(ORDER))
    # prevent negative
    #for param in a.parameters():
    #    param.data = param.data.abs()
    #print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    inp = Variable(torch.Tensor(train_aggregate.reshape((train_aggregate.shape[0], -1, 1))).type(torch.FloatTensor),
                   requires_grad=True)

    valid_pred = {}
    train_pred = {}

    valid_inp = Variable(torch.Tensor(valid_aggregate), requires_grad=False)
    if cuda_av:
        valid_inp = valid_inp.cuda()

    for t in range(1, num_iterations+1):
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([out_train[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 100 == 1:
            print(t, loss.data[0])

        if t%1000 == 0 and t!=0:
            if cuda_av:
                valid_inp = valid_inp.cuda()
            valid_params = [valid_inp, -2]
            for i in range(len(ORDER)):
                valid_params.append(None)
            valid_pr = a(*valid_params)
            valid_pr = torch.clamp(valid_pr, min=0.)
            valid_pred[t] = valid_pr

            train_pr = pred
            train_pr = torch.clamp(train_pr, min=0.)
            train_pred[t] = train_pr


        loss.backward()
        optimizer.step()

    # store training prediction
    train_fold = {}
    for t in range(1000, num_iterations + 1, 1000):
        # train_pred[t] = torch.clamp(train_pred[t], min=0.)
        train_pred[t] = torch.split(train_pred[t], train_aggregate.shape[0])
        train_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                train_fold[t][appliance_num] = train_pred[t][appliance_num].cpu().data.numpy().reshape(-1, 24)
        else:
            for appliance_num, appliance in enumerate(ORDER):
                train_fold[t][appliance_num] = train_pred[t][appliance_num].data.numpy().reshape(-1, 24)


    # test one validation set

    valid_fold = {}
    for t in range(1000, num_iterations+1, 1000):

        valid_pred[t] = torch.split(valid_pred[t], valid_aggregate.shape[0])
        valid_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][appliance_num].cpu().data.numpy().reshape(-1, 24)
        else:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][appliance_num].data.numpy().reshape(-1, 24)
    
    # store gound truth of validation set
    gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        gt_fold[appliance_num] = valid[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(valid_aggregate.shape[0], -1, 
                                                                                        1).reshape(-1, 24)

    # calcualte the error of validation set
    error = {}
    for t in range(1000, num_iterations+1, 1000):
        error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            error[t][appliance] = mean_absolute_error(valid_fold[t][appliance_num], gt_fold[appliance_num])
    
    return train_fold, valid_fold, error
Exemple #10
0
    cuda_av = True
else:
    cuda_av = False

fold_num = 0
num_folds = 5
cell_type="GRU"
hidden_size = 150
lr = 0.1
bidirectional = True
appliance = "hvac"

torch.manual_seed(0)

appliance_num = APPLIANCE_ORDER.index(appliance)
train, test = get_train_test(2, num_folds=num_folds, fold_num=fold_num)

train_aggregate = train[:, 0, :, :].reshape(train.shape[0], -1, 1)

test_aggregate = test[:, 0, :, :].reshape(test.shape[0], -1, 1)

train_appliance = train[:, appliance_num, :, :].reshape(train.shape[0], -1, 1)
test_appliance = test[:, appliance_num, :, :].reshape(test.shape[0], -1, 1)


loss_func = nn.L1Loss()
r = CustomRNN(cell_type, hidden_size, 1, bidirectional)

if cuda_av:
    r = r.cuda()
    loss_func = loss_func.cuda()
Exemple #11
0
def disagg(dataset, cell_type, hidden_size, num_layers, bidirectional, lr,
           num_iterations, p):
    preds = []
    gts = []
    for fold_num in range(num_folds):
        print("-" * 40)
        sys.stdout.flush()
        train, test = get_train_test(dataset,
                                     num_folds=num_folds,
                                     fold_num=fold_num)
        train_aggregate = train[:, 0, :, :].reshape(-1, 24)
        test_aggregate = test[:, 0, :, :].reshape(-1, 24)
        #ORDER = APPLIANCE_ORDER[1:][:][::-1]
        # ORDER = ['mw','dw','fridge','dr','hvac']
        out_train = [None for temp in range(len(ORDER))]
        for a_num, appliance in enumerate(ORDER):
            out_train[a_num] = Variable(
                torch.Tensor(
                    train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
                        (train_aggregate.shape[0], -1))))
            if cuda_av:
                out_train[a_num] = out_train[a_num].cuda()

        out_test = [None for temp in range(len(ORDER))]
        for a_num, appliance in enumerate(ORDER):
            out_test[a_num] = Variable(
                torch.Tensor(
                    test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
                        (test_aggregate.shape[0], -1))))
            if cuda_av:
                out_test[a_num] = out_test[a_num].cuda()

        loss_func = nn.L1Loss()
        a = AppliancesRNN(cell_type,
                          hidden_size,
                          num_layers,
                          bidirectional,
                          num_appliance=len(ORDER))
        for param in a.parameters():
            param.data = param.data.abs()
        # print(a)
        if cuda_av:
            a = a.cuda()
            loss_func = loss_func.cuda()
        optimizer = torch.optim.Adam(a.parameters(), lr=lr)
        inp = Variable(torch.Tensor(
            train_aggregate.reshape(
                (train_aggregate.shape[0], -1))).type(torch.FloatTensor),
                       requires_grad=True)
        for t in range(num_iterations):
            inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
            out = torch.cat([
                out_train[appliance_num]
                for appliance_num, appliance in enumerate(ORDER)
            ])
            ot = torch.cat([
                out_test[appliance_num]
                for appliance_num, appliance in enumerate(ORDER)
            ])
            if cuda_av:
                inp = inp.cuda()
                out = out.cuda()
                ot = ot.cuda()

            params = [inp, p]
            for a_num, appliance in enumerate(ORDER):
                params.append(out_train[a_num])
            # print(params)
            pred = a(*params)

            optimizer.zero_grad()
            pred_split = torch.split(pred, pred.size(0) // len(ORDER))

            losses = [
                loss_func(pred_split[appliance_num], out_train[appliance_num])
                * weight_appliance[appliance]
                for appliance_num, appliance in enumerate(ORDER)
            ]

            loss = sum(losses) / len(ORDER)
            if t % 20 == 0:
                print(t, loss.data[0])
                sys.stdout.flush()

            loss.backward()
            optimizer.step()

        test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
        if cuda_av:
            test_inp = test_inp.cuda()

        params = [test_inp, -2]
        for i in range(len(ORDER)):
            params.append(None)
        pr = a(*params)
        pr = torch.clamp(pr, min=0.)
        test_pred = torch.split(pr, test_aggregate.shape[0])
        prediction_fold = [None for x in range(len(ORDER))]

        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                prediction_fold[appliance_num] = test_pred[appliance_num].cpu(
                ).data.numpy().reshape(-1, 24)
        else:
            for appliance_num, appliance in enumerate(ORDER):
                prediction_fold[appliance_num] = test_pred[
                    appliance_num].data.numpy().reshape(-1, 24)
        gt_fold = [None for x in range(len(ORDER))]
        for appliance_num, appliance in enumerate(ORDER):
            gt_fold[appliance_num] = test[:,
                                          APPLIANCE_ORDER.
                                          index(appliance), :, :].reshape(
                                              test_aggregate.shape[0], -1,
                                              1).reshape(-1, 24)

        preds.append(prediction_fold)
        gts.append(gt_fold)

    prediction_flatten = {}
    gt_flatten = {}
    for appliance_num, appliance in enumerate(ORDER):
        prediction_flatten[appliance] = []
        gt_flatten[appliance] = []

    for appliance_num, appliance in enumerate(ORDER):
        for fold in range(num_folds):
            prediction_flatten[appliance].append(preds[fold][appliance_num])
            gt_flatten[appliance].append(gts[fold][appliance_num])
        gt_flatten[appliance] = np.concatenate(gt_flatten[appliance])
        prediction_flatten[appliance] = np.concatenate(
            prediction_flatten[appliance])

    err = {}
    for appliance in ORDER:
        print(appliance)
        sys.stdout.flush()
        err[appliance] = mean_absolute_error(gt_flatten[appliance],
                                             prediction_flatten[appliance])
    return err
def disagg_fold(fold_num, dataset, lr, num_iterations, p, ORDER):

    train, test = get_train_test(dataset,
                                 num_folds=num_folds,
                                 fold_num=fold_num)
    train, valid = train_test_split(train, test_size=0.2, random_state=0)

    train_aggregate = train[:, 0, :, :].reshape(-1, 24)
    valid_aggregate = valid[:, 0, :, :].reshape(-1, 24)
    #ORDER = APPLIANCE_ORDER[1:][:][::-1]
    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (train_aggregate.shape[0], -1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_valid = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_valid[a_num] = Variable(
            torch.Tensor(valid[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (valid_aggregate.shape[0], -1))))
        if cuda_av:
            out_valid[a_num] = out_valid[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(num_appliance=len(ORDER))
    # for param in a.parameters():
    #    param.data = param.data.abs()
    # print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)
    inp = Variable(torch.Tensor(
        train_aggregate.reshape(
            (train_aggregate.shape[0], -1))).type(torch.FloatTensor),
                   requires_grad=True)
    for t in range(num_iterations):
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([
            out_train[appliance_num]
            for appliance_num, appliance in enumerate(ORDER)
        ])
        ot = torch.cat([
            out_valid[appliance_num]
            for appliance_num, appliance in enumerate(ORDER)
        ])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()
            ot = ot.cuda()

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        pred_split = torch.split(pred, pred.size(0) // len(ORDER))

        losses = [
            loss_func(pred_split[appliance_num], out_train[appliance_num]) *
            weight_appliance[appliance]
            for appliance_num, appliance in enumerate(ORDER)
        ]

        loss = sum(losses) / len(ORDER)
        if t % 10 == 0:
            print(t, loss.data[0])

        loss.backward()
        optimizer.step()

    train_pred = torch.clamp(pred, min=0.)
    train_pred = torch.split(train_pred, train_aggregate.shape[0])
    train_fold = [None for x in range(len(ORDER))]
    if cuda_av:
        for appliance_num, appliance in enumerate(ORDER):
            train_fold[appliance_num] = train_pred[appliance_num].cpu(
            ).data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(ORDER):
            train_fold[appliance_num] = train_pred[appliance_num].data.numpy(
            ).reshape(-1, 24)

    # test one validation set
    valid_inp = Variable(torch.Tensor(valid_aggregate), requires_grad=False)
    if cuda_av:
        valid_inp = valid_inp.cuda()

    params = [valid_inp, -2]
    for i in range(len(ORDER)):
        params.append(None)
    pr = a(*params)
    pr = torch.clamp(pr, min=0.)
    valid_pred = torch.split(pr, valid_aggregate.shape[0])
    valid_fold = [None for x in range(len(ORDER))]
    if cuda_av:
        for appliance_num, appliance in enumerate(ORDER):
            valid_fold[appliance_num] = valid_pred[appliance_num].cpu(
            ).data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(ORDER):
            valid_fold[appliance_num] = valid_pred[appliance_num].data.numpy(
            ).reshape(-1, 24)

    # store gound truth of validation set
    gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        gt_fold[appliance_num] = valid[:,
                                       APPLIANCE_ORDER.
                                       index(appliance), :, :].reshape(
                                           valid_aggregate.shape[0], -1,
                                           1).reshape(-1, 24)

    # calcualte the error of validation set
    error = {}
    for appliance_num, appliance in enumerate(ORDER):
        error[appliance] = mean_absolute_error(valid_fold[appliance_num],
                                               gt_fold[appliance_num])

    return train_fold, valid_fold, error
Exemple #13
0
def disagg_fold(fold_num, cell_type, hidden_size, num_layers, bidirectional,
                lr, num_iterations, order, p):
    torch.manual_seed(0)

    train, test = get_train_test(num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)

    out_train = [None for temp in range(len(order))]
    for a_num, appliance in enumerate(order):
        out_train[a_num] = Variable(
            torch.Tensor(train[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (train_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, order)

    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()

    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    for t in range(num_iterations):

        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([
            out_train[appliance_num]
            for appliance_num, appliance in enumerate(order)
        ])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()

        pred = a(inp, out_train, p)

        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 5 == 0:
            print(t, loss.data[0])
        loss.backward()
        optimizer.step()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()
    pr = a(test_inp, {appliance: None for appliance in order}, -2)
    pr = torch.clamp(pr, min=0.)
    test_pred = torch.split(pr, test_aggregate.shape[0])
    prediction_fold = [None for x in range(len(order))]
    if cuda_av:
        for appliance_num, appliance in enumerate(order):
            prediction_fold[appliance_num] = test_pred[appliance_num].cpu(
            ).data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(order):
            prediction_fold[appliance_num] = test_pred[
                appliance_num].data.numpy().reshape(-1, 24)
    gt_fold = [None for x in range(len(order))]
    for appliance_num, appliance in enumerate(order):
        gt_fold[appliance_num] = test[:,
                                      APPLIANCE_ORDER.
                                      index(appliance), :, :].reshape(
                                          test_aggregate.shape[0], -1,
                                          1).reshape(-1, 24)
    return prediction_fold, gt_fold
Exemple #14
0
def disagg_fold(fold_num, cell_type, hidden_size, num_layers, bidirectional,
                lr, num_iterations, order, p):
    torch.manual_seed(0)

    train, test = get_train_test(num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)

    out_train = {}
    out_test = {}
    for a_num, appliance in enumerate(order):
        out_train[appliance] = Variable(
            torch.Tensor(train[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (train_aggregate.shape[0], -1, 1))))
        out_test[appliance] = Variable(
            torch.Tensor(test[:,
                              APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                  (test_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[appliance] = out_train[appliance].cuda()
            out_test[appliance] = out_test[appliance].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, order)

    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()

    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    for t in range(num_iterations):

        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([
            out_train[appliance]
            for appliance_num, appliance in enumerate(order)
        ])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()

        params = [inp, p]
        for appliance in order:
            params.append(out_train[appliance])

        pred = a(*params)
        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 5 == 0:
            print(t, loss.data[0])
        loss.backward()
        optimizer.step()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()
    test_params = [test_inp, -2]
    for appliance in order:
        test_params.append(None)
    pr = a(*test_params)
    pr = torch.clamp(pr, min=0.)
    """
    for appliance_num, appliance in enumerate(order):
        print(loss_func(pr[appliance_num], out_test[appliance]))
    """

    return pr
                    if error < min_error[fold_num]:
                        min_error[fold_num] = error
                        cnn_tree_best_param[fold_num]['lr'] = lr
                        cnn_tree_best_param[fold_num]['order'] = order
                        cnn_tree_best_param[fold_num]['iters'] = it

    return cnn_tree_errors, min_error, cnn_tree_best_param


tensor = np.load("../2015-5appliances.numpy.npy")
test_gt = {}
valid_gt = {}
for fold_num in range(5):
    test_gt[fold_num] = {}
    valid_gt[fold_num] = {}
    train, test = get_train_test(1, 5, fold_num)
    valid = train[int(0.8 * len(train)):].copy()
    for idx, appliance in enumerate(APPLIANCE_ORDER[1:-1]):
        test_gt[fold_num][appliance] = test[:, idx + 1]
        valid_gt[fold_num][appliance] = valid[:, idx + 1]

threshold = {}
for appliance in ['hvac', 'fridge', 'dr', 'dw', 'mw']:
    sample_list = []
    for fold_num in range(5):
        sample_list = np.append(sample_list, [
            x for x in test_gt[fold_num][appliance].reshape(1, -1).tolist()[0]
            if x > ON_THRESHOLD[appliance]
        ])
    mean = np.mean(sample_list)
    print(appliance, mean)
def disagg_fold(dataset, fold_num, lr, p):
    train, test = get_train_test(dataset,
                                 num_folds=num_folds,
                                 fold_num=fold_num)
    valid = train[int(0.8 * len(train)):].copy()
    train = train[:int(0.8 * len(train))].copy()
    train_aggregate = train[:, 0, :, :].reshape(train.shape[0], 1, -1, 24)
    valid_aggregate = valid[:, 0, :, :].reshape(valid.shape[0], 1, -1, 24)
    test_aggregate = test[:, 0, :, :].reshape(test.shape[0], 1, -1, 24)

    out_train, out_valid, out_test = preprocess(train, valid, test)

    loss_func = nn.L1Loss()
    model = AppliancesCNN(len(ORDER))
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    if cuda_av:
        model = model.cuda()
        loss_func = loss_func.cuda()

    inp = Variable(torch.Tensor(train_aggregate), requires_grad=False)
    valid_inp = Variable(torch.Tensor(valid_aggregate), requires_grad=False)
    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        inp = inp.cuda()
        valid_inp = valid_inp.cuda()
        test_inp = test_inp.cuda()
    valid_out = torch.cat([
        out_valid[appliance_num]
        for appliance_num, appliance in enumerate(ORDER)
    ])
    test_out = torch.cat([
        out_test[appliance_num]
        for appliance_num, appliance in enumerate(ORDER)
    ])
    train_out = torch.cat([
        out_train[appliance_num]
        for appliance_num, appliance in enumerate(ORDER)
    ])

    valid_pred = {}
    train_pred = {}
    test_pred = {}
    train_losses = {}
    test_losses = {}
    valid_losses = {}

    params = [inp, p]
    for a_num, appliance in enumerate(ORDER):
        params.append(out_train[a_num])

    if cuda_av:
        train_out = train_out.cuda()

    for t in range(1, num_iterations + 1):

        pred = model(*params)
        optimizer.zero_grad()
        loss = loss_func(pred, train_out)

        if t % 500 == 0:

            if cuda_av:
                valid_inp = valid_inp.cuda()
            valid_params = [valid_inp, -2]
            for i in range(len(ORDER)):
                valid_params.append(None)
            valid_pr = model(*valid_params)
            valid_loss = loss_func(valid_pr, valid_out)

            if cuda_av:
                test_inp = test_inp.cuda()
            test_params = [test_inp, -2]
            for i in range(len(ORDER)):
                test_params.append(None)
            test_pr = model(*test_params)
            test_loss = loss_func(test_pr, test_out)

            test_losses[t] = test_loss.data[0]
            valid_losses[t] = valid_loss.data[0]
            train_losses[t] = loss.data[0]
            # np.save("./baseline/p_50_loss")

            if t % 1000 == 0:
                valid_pr = torch.clamp(valid_pr, min=0.)
                valid_pred[t] = valid_pr
                test_pr = torch.clamp(test_pr, min=0.)
                test_pred[t] = test_pr
                train_pr = pred
                train_pr = torch.clamp(train_pr, min=0.)
                train_pred[t] = train_pr

            print("Round:", t, "Training Error:", loss.data[0],
                  "Validation Error:", valid_loss.data[0], "Test Error:",
                  test_loss.data[0])

        loss.backward()
        optimizer.step()

    train_fold = [None for x in range(len(ORDER))]
    train_fold = {}
    for t in range(1000, num_iterations + 1, 1000):
        train_pred[t] = torch.split(train_pred[t], train_aggregate.shape[0])
        train_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                train_fold[t][appliance_num] = train_pred[t][
                    appliance_num].cpu().data.numpy().reshape(-1, 24)
        else:
            for appliance_num, appliance in enumerate(ORDER):
                train_fold[t][appliance_num] = train_pred[t][
                    appliance_num].data.numpy().reshape(-1, 24)

    valid_fold = {}
    for t in range(1000, num_iterations + 1, 1000):
        valid_pred[t] = torch.split(valid_pred[t], valid_aggregate.shape[0])
        valid_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][
                    appliance_num].cpu().data.numpy().reshape(-1, 24)
        else:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][
                    appliance_num].data.numpy().reshape(-1, 24)

    test_fold = {}
    for t in range(1000, num_iterations + 1, 1000):
        test_pred[t] = torch.split(test_pred[t], test_aggregate.shape[0])
        test_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                test_fold[t][appliance_num] = test_pred[t][appliance_num].cpu(
                ).data.numpy().reshape(-1, 24)
        else:
            for appliance_num, appliance in enumerate(ORDER):
                test_fold[t][appliance_num] = test_pred[t][
                    appliance_num].data.numpy().reshape(-1, 24)

    # store ground truth of validation set
    train_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        train_gt_fold[appliance_num] = train[:,
                                             APPLIANCE_ORDER.
                                             index(appliance), :, :].reshape(
                                                 train_aggregate.shape[0], -1,
                                                 1).reshape(-1, 24)

    valid_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        valid_gt_fold[appliance_num] = valid[:,
                                             APPLIANCE_ORDER.
                                             index(appliance), :, :].reshape(
                                                 valid_aggregate.shape[0], -1,
                                                 1).reshape(-1, 24)

    test_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        test_gt_fold[appliance_num] = test[:,
                                           APPLIANCE_ORDER.
                                           index(appliance), :, :].reshape(
                                               test_aggregate.shape[0], -1,
                                               1).reshape(-1, 24)

    # calcualte the error of validation set
    train_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        train_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            train_error[t][appliance] = mean_absolute_error(
                train_fold[t][appliance_num], train_gt_fold[appliance_num])

    valid_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        valid_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            valid_error[t][appliance] = mean_absolute_error(
                valid_fold[t][appliance_num], valid_gt_fold[appliance_num])

    test_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        test_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            test_error[t][appliance] = mean_absolute_error(
                test_fold[t][appliance_num], test_gt_fold[appliance_num])

    return train_fold, valid_fold, test_fold, train_error, valid_error, test_error, train_losses, valid_losses, test_losses
def disagg_fold(fold_num, dataset, cell_type, hidden_size, num_layers, bidirectional, lr, num_iterations, p):
    # print (fold_num, hidden_size, num_layers, bidirectional, lr, num_iterations, p)
    print (ORDER)
    torch.manual_seed(0)

    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)

    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, len(ORDER))
    # prevent negative
    #for param in a.parameters():
    #    param.data = param.data.abs()
    #print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    inp = Variable(torch.Tensor(train_aggregate.reshape((train_aggregate.shape[0], -1, 1))).type(torch.FloatTensor),
                   requires_grad=True)
    for t in range(num_iterations):
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([out_train[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 100 == 0:
            print(t, loss.data[0])

        loss.backward()
        optimizer.step()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()

    params = [test_inp, -2]
    for i in range(len(ORDER)):
        params.append(None)
    pr = a(*params)
    pr = torch.clamp(pr, min=0.)
    test_pred = torch.split(pr, test_aggregate.shape[0])
    prediction_fold = [None for x in range(len(ORDER))]

    if cuda_av:
        for appliance_num, appliance in enumerate(ORDER):
            prediction_fold[appliance_num] = test_pred[appliance_num].cpu().data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(ORDER):
            prediction_fold[appliance_num] = test_pred[appliance_num].data.numpy().reshape(-1, 24)
    gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        gt_fold[appliance_num] = test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(test_aggregate.shape[0], -1,
                                                                                         1).reshape(-1, 24)

    return prediction_fold, gt_fold
Exemple #18
0
def dnn_fold(dataset, fold_num, lr, num_iterations, p):
    print (fold_num)
    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24)
    #ORDER = APPLIANCE_ORDER[1:][:][::-1]
    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_test = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_test[a_num] = Variable(
            torch.Tensor(test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((test_aggregate.shape[0], -1))))
        if cuda_av:
            out_test[a_num] = out_test[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(num_appliance=len(ORDER))
    # for param in a.parameters():
    #    param.data = param.data.abs()
    # print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)
    inp = Variable(torch.Tensor(train_aggregate.reshape((train_aggregate.shape[0], -1))).type(torch.FloatTensor),
                   requires_grad=True)
    for t in range(num_iterations):
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([out_train[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        ot = torch.cat([out_test[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()
            ot = ot.cuda()

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        pred_split = torch.split(pred, pred.size(0) // len(ORDER))

        losses = [loss_func(pred_split[appliance_num], out_train[appliance_num]) * weight_appliance[appliance] for
                  appliance_num, appliance in enumerate(ORDER)]

        loss = sum(losses)/len(ORDER)
        if t % 10 == 0:
            print(t, loss.data[0])

        loss.backward()
        optimizer.step()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()

    params = [test_inp, -2]
    for i in range(len(ORDER)):
        params.append(None)
    pr = a(*params)
    pr = torch.clamp(pr, min=0.)
    test_pred = torch.split(pr, test_aggregate.shape[0])
    prediction_fold = [None for x in range(len(ORDER))]

    if cuda_av:
        for appliance_num, appliance in enumerate(ORDER):
            prediction_fold[appliance_num] = test_pred[appliance_num].cpu().data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(ORDER):
            prediction_fold[appliance_num] = test_pred[appliance_num].data.numpy().reshape(-1, 24)
    gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        gt_fold[appliance_num] = test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(test_aggregate.shape[0], -1,
                                                                                         1).reshape(-1, 24)

    # preds.append(prediction_fold)
    # gts.append(gt_fold)
    return prediction_fold, gt_fold
Exemple #19
0
lr = float(lr)
num_iterations = int(num_iterations)

ORDER = sys.argv[7:]

p = 0
num_folds = 5

torch.manual_seed(0)

#ORDER = ['hvac']

preds = []
gts = []
for fold_num in range(5):
    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24)
    #ORDER = APPLIANCE_ORDER[1:][:][::-1]
    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_test = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_test[a_num] = Variable(
            torch.Tensor(test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((test_aggregate.shape[0], -1))))
        if cuda_av:
num_hidden = int(num_hidden)
num_layers = int(num_layers)
num_iterations = int(num_iterations)
p = float(p)
num_directions = int(num_directions)
ORDER = sys.argv[6:len(sys.argv)]

from sklearn.metrics import mean_absolute_error

import numpy as np

import pandas as pd
from dataloader import APPLIANCE_ORDER, get_train_test

num_folds = 5
train, test = get_train_test(1, num_folds=num_folds, fold_num=0)

train_agg = train[:, 0, :].reshape(-1, 24)
test_agg = test[:, 0, :].reshape(-1, 24)

import torch
import torch.nn as nn
from torch.autograd import Variable

cuda_av = False
if torch.cuda.is_available():
    cuda_av = True

torch.manual_seed(0)
np.random.seed(0)