コード例 #1
0
def preprocess(train, valid, test):
    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (train.shape[0], 1, -1, 24))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_valid = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_valid[a_num] = Variable(
            torch.Tensor(valid[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (valid.shape[0], 1, -1, 24))))
        if cuda_av:
            out_valid[a_num] = out_valid[a_num].cuda()

    out_test = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_test[a_num] = Variable(
            torch.Tensor(test[:,
                              APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                  (test.shape[0], 1, -1, 24))))
        if cuda_av:
            out_test[a_num] = out_test[a_num].cuda()

    return out_train, out_valid, out_test
コード例 #2
0
def disagg_fold_new(fold_num, appliance, cell_type, hidden_size, num_layers,
                    bidirectional, lr, num_iterations):
    torch.manual_seed(0)

    appliance_num = APPLIANCE_ORDER.index(appliance)
    train, test = get_train_test(num_folds=num_folds, fold_num=fold_num)

    train = np.vstack([train, aug_data[:num_aug]])

    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)

    train_appliance = train[:, appliance_num, :, :].reshape(-1, 24, 1)
    test_appliance = test[:, appliance_num, :, :].reshape(-1, 24, 1)
    gts.append(test_appliance.reshape(-1, 24))
    loss_func = nn.L1Loss()
    r = CustomRNN(cell_type, hidden_size, num_layers, bidirectional)

    if cuda_av:
        r = r.cuda()
        loss_func = loss_func.cuda()

    # Setting the params all to be non-negative
    #for param in r.parameters():
    #    param.data = param.data.abs()

    optimizer = torch.optim.Adam(r.parameters(), lr=lr)

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    test_y = Variable(torch.Tensor(test_appliance), requires_grad=False)

    prediction_fold = {}

    for t in range(1, num_iterations + 1):

        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        train_y = Variable(torch.Tensor(train_appliance))
        if cuda_av:
            inp = inp.cuda()
            train_y = train_y.cuda()
        pred = r(inp)

        optimizer.zero_grad()
        loss = loss_func(pred, train_y)
        if t % 100 == 0:
            print(t, loss.data[0])
        loss.backward()
        optimizer.step()

        if t % 200 == 0 and t != 0:
            if cuda_av:
                test_inp = test_inp.cuda()
            pred_test = r(test_inp)
            pred_test = torch.clamp(pred_test, min=0.)
            if cuda_av:
                prediction_fold[t] = pred_test.cpu().data.numpy()
            else:
                prediction_fold[t] = pred_test.data.numpy()

    return prediction_fold, test_appliance
コード例 #3
0
ファイル: tree-dnn-Yiling.py プロジェクト: xunyiljg/freq-nilm
torch.manual_seed(0)

#ORDER = ['hvac']

preds = []
gts = []
for fold_num in range(5):
    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24)
    #ORDER = APPLIANCE_ORDER[1:][:][::-1]
    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_test = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_test[a_num] = Variable(
            torch.Tensor(test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((test_aggregate.shape[0], -1))))
        if cuda_av:
            out_test[a_num] = out_test[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(size1, size2, size3, len(ORDER))
    # for param in a.parameters():
    #    param.data = param.data.abs()
    # print(a)
コード例 #4
0
def disagg_fold(fold_num, dataset, lr, num_iterations, p, ORDER):

    train, test = get_train_test(dataset,
                                 num_folds=num_folds,
                                 fold_num=fold_num)
    train, valid = train_test_split(train, test_size=0.2, random_state=0)

    train_aggregate = train[:, 0, :, :].reshape(-1, 24)
    valid_aggregate = valid[:, 0, :, :].reshape(-1, 24)
    #ORDER = APPLIANCE_ORDER[1:][:][::-1]
    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (train_aggregate.shape[0], -1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_valid = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_valid[a_num] = Variable(
            torch.Tensor(valid[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (valid_aggregate.shape[0], -1))))
        if cuda_av:
            out_valid[a_num] = out_valid[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(num_appliance=len(ORDER))
    # for param in a.parameters():
    #    param.data = param.data.abs()
    # print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)
    inp = Variable(torch.Tensor(
        train_aggregate.reshape(
            (train_aggregate.shape[0], -1))).type(torch.FloatTensor),
                   requires_grad=True)
    for t in range(num_iterations):
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([
            out_train[appliance_num]
            for appliance_num, appliance in enumerate(ORDER)
        ])
        ot = torch.cat([
            out_valid[appliance_num]
            for appliance_num, appliance in enumerate(ORDER)
        ])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()
            ot = ot.cuda()

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        pred_split = torch.split(pred, pred.size(0) // len(ORDER))

        losses = [
            loss_func(pred_split[appliance_num], out_train[appliance_num]) *
            weight_appliance[appliance]
            for appliance_num, appliance in enumerate(ORDER)
        ]

        loss = sum(losses) / len(ORDER)
        if t % 10 == 0:
            print(t, loss.data[0])

        loss.backward()
        optimizer.step()

    train_pred = torch.clamp(pred, min=0.)
    train_pred = torch.split(train_pred, train_aggregate.shape[0])
    train_fold = [None for x in range(len(ORDER))]
    if cuda_av:
        for appliance_num, appliance in enumerate(ORDER):
            train_fold[appliance_num] = train_pred[appliance_num].cpu(
            ).data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(ORDER):
            train_fold[appliance_num] = train_pred[appliance_num].data.numpy(
            ).reshape(-1, 24)

    # test one validation set
    valid_inp = Variable(torch.Tensor(valid_aggregate), requires_grad=False)
    if cuda_av:
        valid_inp = valid_inp.cuda()

    params = [valid_inp, -2]
    for i in range(len(ORDER)):
        params.append(None)
    pr = a(*params)
    pr = torch.clamp(pr, min=0.)
    valid_pred = torch.split(pr, valid_aggregate.shape[0])
    valid_fold = [None for x in range(len(ORDER))]
    if cuda_av:
        for appliance_num, appliance in enumerate(ORDER):
            valid_fold[appliance_num] = valid_pred[appliance_num].cpu(
            ).data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(ORDER):
            valid_fold[appliance_num] = valid_pred[appliance_num].data.numpy(
            ).reshape(-1, 24)

    # store gound truth of validation set
    gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        gt_fold[appliance_num] = valid[:,
                                       APPLIANCE_ORDER.
                                       index(appliance), :, :].reshape(
                                           valid_aggregate.shape[0], -1,
                                           1).reshape(-1, 24)

    # calcualte the error of validation set
    error = {}
    for appliance_num, appliance in enumerate(ORDER):
        error[appliance] = mean_absolute_error(valid_fold[appliance_num],
                                               gt_fold[appliance_num])

    return train_fold, valid_fold, error
コード例 #5
0
def disagg_fold(fold_num, dataset, cell_type, hidden_size, num_layers, bidirectional, lr, num_iterations, p):
    # print (fold_num, hidden_size, num_layers, bidirectional, lr, num_iterations, p)
    print (ORDER)
    torch.manual_seed(0)

    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)

    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, len(ORDER))
    # prevent negative
    #for param in a.parameters():
    #    param.data = param.data.abs()
    #print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    inp = Variable(torch.Tensor(train_aggregate.reshape((train_aggregate.shape[0], -1, 1))).type(torch.FloatTensor),
                   requires_grad=True)
    for t in range(num_iterations):
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([out_train[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 100 == 0:
            print(t, loss.data[0])

        loss.backward()
        optimizer.step()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()

    params = [test_inp, -2]
    for i in range(len(ORDER)):
        params.append(None)
    pr = a(*params)
    pr = torch.clamp(pr, min=0.)
    test_pred = torch.split(pr, test_aggregate.shape[0])
    prediction_fold = [None for x in range(len(ORDER))]

    if cuda_av:
        for appliance_num, appliance in enumerate(ORDER):
            prediction_fold[appliance_num] = test_pred[appliance_num].cpu().data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(ORDER):
            prediction_fold[appliance_num] = test_pred[appliance_num].data.numpy().reshape(-1, 24)
    gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        gt_fold[appliance_num] = test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(test_aggregate.shape[0], -1,
                                                                                         1).reshape(-1, 24)

    return prediction_fold, gt_fold
コード例 #6
0
def disagg_fold(dataset, fold_num, lr, p):
	train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
	valid = train[int(0.8*len(train)):].copy()
	train = train[:int(0.8 * len(train))].copy()
	train_aggregate = train[:, 0, :, :].reshape(train.shape[0], 1, -1, 24)
	valid_aggregate = valid[:, 0, :, :].reshape(valid.shape[0], 1, -1, 24)
	test_aggregate = test[:, 0, :, :].reshape(test.shape[0], 1, -1, 24)

	out_train, out_valid, out_test = preprocess(train, valid, test)
	
	loss_func = nn.L1Loss()
	model = AppliancesCNN(len(ORDER))
	optimizer = torch.optim.Adam(model.parameters(), lr=lr)

	if cuda_av:
	    model = model.cuda()
	    loss_func = loss_func.cuda()


	inp = Variable(torch.Tensor(train_aggregate), requires_grad=False)
	valid_inp = Variable(torch.Tensor(valid_aggregate), requires_grad=False)
	test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
	if cuda_av:
	    inp = inp.cuda()
	    valid_inp = valid_inp.cuda()
	    test_inp = test_inp.cuda()
	valid_out = torch.cat([out_valid[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
	test_out = torch.cat([out_test[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
	train_out = torch.cat([out_train[appliance_num] for appliance_num, appliance in enumerate(ORDER)])

	valid_pred = {}
	train_pred = {}
	test_pred = {}
	train_losses = {}
	test_losses = {}
	valid_losses = {}

	params = [inp, p]
	for a_num, appliance in enumerate(ORDER):
	    params.append(out_train[a_num])

	if cuda_av:
		train_out = train_out.cuda()
	        
	for t in range(1, num_iterations+1):
	  
	    pred = model(*params)
	    optimizer.zero_grad()
	    loss = loss_func(pred, train_out)

	    if t % 500 == 0:

	        if cuda_av:
	            valid_inp = valid_inp.cuda()
	        valid_params = [valid_inp, -2]
	        for i in range(len(ORDER)):
	            valid_params.append(None)
	        valid_pr = model(*valid_params)
	        valid_loss = loss_func(valid_pr, valid_out)

	        if cuda_av:
	            test_inp = test_inp.cuda()
	        test_params = [test_inp, -2]
	        for i in range(len(ORDER)):
	            test_params.append(None)
	        test_pr = model(*test_params)
	        test_loss = loss_func(test_pr, test_out)

	        test_losses[t] = test_loss.data[0]
	        valid_losses[t] = valid_loss.data[0]
	        train_losses[t] = loss.data[0]
	        # np.save("./baseline/p_50_loss")

	        if t % 1000 == 0:
	            valid_pr = torch.clamp(valid_pr, min=0.)
	            valid_pred[t] = valid_pr
	            test_pr = torch.clamp(test_pr, min=0.)
	            test_pred[t] = test_pr
	            train_pr = pred
	            train_pr = torch.clamp(train_pr, min=0.)
	            train_pred[t] = train_pr

	        print("Round:", t, "Training Error:", loss.data[0], "Validation Error:", valid_loss.data[0], "Test Error:", test_loss.data[0])

	    loss.backward()
	    optimizer.step()

	train_fold = [None for x in range(len(ORDER))]
	train_fold = {}
	for t in range(1000, num_iterations + 1, 1000):
	    train_pred[t] = torch.split(train_pred[t], train_aggregate.shape[0])
	    train_fold[t] = [None for x in range(len(ORDER))]
	    if cuda_av:
	        for appliance_num, appliance in enumerate(ORDER):
	            train_fold[t][appliance_num] = train_pred[t][appliance_num].cpu().data.numpy().reshape(-1, 24)
	    else:
	        for appliance_num, appliance in enumerate(ORDER):
	            train_fold[t][appliance_num] = train_pred[t][appliance_num].data.numpy().reshape(-1, 24)
                
	valid_fold = {}
	for t in range(1000, num_iterations + 1, 1000):
	    valid_pred[t] = torch.split(valid_pred[t], valid_aggregate.shape[0])
	    valid_fold[t] = [None for x in range(len(ORDER))]
	    if cuda_av:
	        for appliance_num, appliance in enumerate(ORDER):
	            valid_fold[t][appliance_num] = valid_pred[t][appliance_num].cpu().data.numpy().reshape(-1, 24)
	    else:
	        for appliance_num, appliance in enumerate(ORDER):
	            valid_fold[t][appliance_num] = valid_pred[t][appliance_num].data.numpy().reshape(-1, 24)

	test_fold = {}
	for t in range(1000, num_iterations + 1, 1000):
	    test_pred[t] = torch.split(test_pred[t], test_aggregate.shape[0])
	    test_fold[t] = [None for x in range(len(ORDER))]
	    if cuda_av:
	        for appliance_num, appliance in enumerate(ORDER):
	            test_fold[t][appliance_num] = test_pred[t][appliance_num].cpu().data.numpy().reshape(-1, 24)
	    else:
	        for appliance_num, appliance in enumerate(ORDER):
	            test_fold[t][appliance_num] = test_pred[t][appliance_num].data.numpy().reshape(-1, 24)

	# store ground truth of validation set
	train_gt_fold = [None for x in range(len(ORDER))]
	for appliance_num, appliance in enumerate(ORDER):
	    train_gt_fold[appliance_num] = train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
	        train_aggregate.shape[0],
	        -1, 1).reshape(-1, 24)

	valid_gt_fold = [None for x in range(len(ORDER))]
	for appliance_num, appliance in enumerate(ORDER):
	    valid_gt_fold[appliance_num] = valid[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
	        valid_aggregate.shape[0],
	        -1, 1).reshape(-1, 24)

	test_gt_fold = [None for x in range(len(ORDER))]
	for appliance_num, appliance in enumerate(ORDER):
	    test_gt_fold[appliance_num] = test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
	        test_aggregate.shape[0],
	        -1, 1).reshape(-1, 24)

	# calcualte the error of validation set
	train_error = {}
	for t in range(1000, num_iterations + 1, 1000):
	    train_error[t] = {}
	    for appliance_num, appliance in enumerate(ORDER):
	        train_error[t][appliance] = mean_absolute_error(train_fold[t][appliance_num], train_gt_fold[appliance_num])

	valid_error = {}
	for t in range(1000, num_iterations + 1, 1000):
	    valid_error[t] = {}
	    for appliance_num, appliance in enumerate(ORDER):
	        valid_error[t][appliance] = mean_absolute_error(valid_fold[t][appliance_num], valid_gt_fold[appliance_num])

	test_error = {}
	for t in range(1000, num_iterations + 1, 1000):
	    test_error[t] = {}
	    for appliance_num, appliance in enumerate(ORDER):
	        test_error[t][appliance] = mean_absolute_error(test_fold[t][appliance_num], test_gt_fold[appliance_num])

	return train_fold, valid_fold, test_fold, train_error, valid_error, test_error, train_losses, valid_losses, test_losses
コード例 #7
0
def disagg_fold(fold_num, dataset, cell_type, hidden_size, num_layers, bidirectional, lr, num_iterations, p):
    # print (fold_num, hidden_size, num_layers, bidirectional, lr, num_iterations, p)
    #print (ORDER)
    torch.manual_seed(0)

    num_folds=5
    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    # from sklearn.model_selection import train_test_split
    # train, valid = train_test_split(train, test_size=0.2, random_state=0)

    valid = train[int(0.8*len(train)):].copy()
    train = train[:int(0.8 * len(train))].copy()


    train_aggregate = train[:, 0, :, :].reshape(-1, train.shape[3], 1)
    valid_aggregate = valid[:, 0, :, :].reshape(-1, train.shape[3], 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, train.shape[3], 1)


    #print (train.shape)
    #print (valid.shape)
    #print (test.shape)

    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_valid = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_valid[a_num] = Variable(
            torch.Tensor(valid[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((valid_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_valid[a_num] = out_valid[a_num].cuda()

    out_test = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_test[a_num] = Variable(
            torch.Tensor(test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((test_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_test[a_num] = out_test[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, len(ORDER))
    # prevent negative
    #for param in a.parameters():
    #    param.data = param.data.abs()
    #print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    inp = Variable(torch.Tensor(train_aggregate.reshape((train_aggregate.shape[0], -1, 1))).type(torch.FloatTensor),
                   requires_grad=True)

    valid_inp = Variable(torch.Tensor(valid_aggregate), requires_grad=False)
    if cuda_av:
        valid_inp = valid_inp.cuda()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()

    valid_pred = {}
    train_pred = {}
    test_pred = {}
    test_losses = {}
    valid_losses = {}

    for t in range(1, num_iterations+1):
        idx_train = Variable(torch.LongTensor(np.random.choice(range(train_aggregate.shape[0]), 50, replace=True)))
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)

        valid_out = torch.cat([out_valid[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        test_out = torch.cat([out_test[appliance_num] for appliance_num, appliance in enumerate(ORDER)])

        if cuda_av:
            idx_train = idx_train.cuda()
            out = torch.cat(
                [out_train[appliance_num].index_select(0, idx_train) for appliance_num, appliance in enumerate(ORDER)])
            inp = inp.cuda().index_select(0, idx_train)
            out = out.cuda()
        else:
            inp = inp.index_select(0, idx_train)
            out = torch.cat(
                [out_train[appliance_num].index_select(0, idx_train) for appliance_num, appliance in enumerate(ORDER)])

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 50 == 0:
            #print(t, loss.data[0])

            if cuda_av:
                valid_inp = valid_inp.cuda()
            valid_params = [valid_inp, -2]
            for i in range(len(ORDER)):
                valid_params.append(None)
            valid_pr = a(*valid_params)
            valid_loss = loss_func(valid_pr, valid_out)

            if cuda_av:
                test_inp = test_inp.cuda()
            test_params = [test_inp, -2]
            for i in range(len(ORDER)):
                test_params.append(None)
            test_pr = a(*test_params)
            test_loss = loss_func(test_pr, test_out)

            test_losses[t] = test_loss.data[0]
            valid_losses[t] = valid_loss.data[0]
            # np.save("./baseline/p_50_loss")

            if t % 1000 == 0:
                valid_pr = torch.clamp(valid_pr, min=0.)
                valid_pred[t] = valid_pr
                test_pr = torch.clamp(test_pr, min=0.)
                test_pred[t] = test_pr
                train_pr = pred
                train_pr = torch.clamp(train_pr, min=0.)
                train_pred[t] = train_pr

            #print("Round:", t, "Training Error:", loss.data[0], "Validation Error:", valid_loss.data[0], "Test Error:", test_loss.data[0])

        loss.backward()
        optimizer.step()

    # store training prediction
    # train_pred = torch.clamp(pred, min=0.)
    # train_pred = torch.split(train_pred, train_aggregate.shape[0])
    train_fold = [None for x in range(len(ORDER))]
    # if cuda_av:
    #     for appliance_num, appliance in enumerate(ORDER):
    #         train_fold[appliance_num] = train_pred[appliance_num].cpu().data.numpy().reshape(-1, 24)
    # else:
    #     for appliance_num, appliance in enumerate(ORDER):
    #         train_fold[appliance_num] = train_pred[appliance_num].data.numpy().reshape(-1, 24)


            # test one validation set

    valid_fold = {}
    for t in range(1000, num_iterations + 1, 1000):

        valid_pred[t] = torch.split(valid_pred[t], valid_aggregate.shape[0])
        valid_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][appliance_num].cpu().data.numpy().reshape(-1, valid.shape[3])
        else:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][appliance_num].data.numpy().reshape(-1, valid.shape[3])

    test_fold = {}
    for t in range(1000, num_iterations + 1, 1000):

        test_pred[t] = torch.split(test_pred[t], test_aggregate.shape[0])
        test_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                test_fold[t][appliance_num] = test_pred[t][appliance_num].cpu().data.numpy().reshape(-1, valid.shape[3])
        else:
            for appliance_num, appliance in enumerate(ORDER):
                test_fold[t][appliance_num] = test_pred[t][appliance_num].data.numpy().reshape(-1, valid.shape[3])

    # store ground truth of validation set
    valid_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        valid_gt_fold[appliance_num] = valid[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
            valid_aggregate.shape[0],
            -1, 1).reshape(-1, valid.shape[3])

    test_gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        test_gt_fold[appliance_num] = test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
            test_aggregate.shape[0],
            -1, 1).reshape(-1, test.shape[3])

    # calcualte the error of validation set
    valid_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        valid_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            valid_error[t][appliance] = mean_absolute_error(valid_fold[t][appliance_num], valid_gt_fold[appliance_num])

    test_error = {}
    for t in range(1000, num_iterations + 1, 1000):
        test_error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            test_error[t][appliance] = mean_absolute_error(test_fold[t][appliance_num], test_gt_fold[appliance_num])

    return train_fold, valid_fold, test_fold, valid_error, test_error, valid_losses, test_losses
コード例 #8
0
def disagg_fold(fold_num, dataset, cell_type, hidden_size, num_layers, bidirectional, lr, num_iterations, p):
    # print (fold_num, hidden_size, num_layers, bidirectional, lr, num_iterations, p)
    print (ORDER)
    torch.manual_seed(0)

    num_folds=5
    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    from sklearn.model_selection import train_test_split
    train, valid = train_test_split(train, test_size=0.2, random_state=0)


    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    valid_aggregate = valid[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)


    print (train.shape)
    print (valid.shape)
    print (test.shape)

    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, len(ORDER))
    # prevent negative
    #for param in a.parameters():
    #    param.data = param.data.abs()
    #print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    inp = Variable(torch.Tensor(train_aggregate.reshape((train_aggregate.shape[0], -1, 1))).type(torch.FloatTensor),
                   requires_grad=True)

    valid_pred = {}
    train_pred = {}

    valid_inp = Variable(torch.Tensor(valid_aggregate), requires_grad=False)
    if cuda_av:
        valid_inp = valid_inp.cuda()

    for t in range(1, num_iterations+1):
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([out_train[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 100 == 1:
            print(t, loss.data[0])

        if t%1000 == 0 and t!=0:
            if cuda_av:
                valid_inp = valid_inp.cuda()
            valid_params = [valid_inp, -2]
            for i in range(len(ORDER)):
                valid_params.append(None)
            valid_pr = a(*valid_params)
            valid_pr = torch.clamp(valid_pr, min=0.)
            valid_pred[t] = valid_pr

            train_pr = pred
            train_pr = torch.clamp(train_pr, min=0.)
            train_pred[t] = train_pr


        loss.backward()
        optimizer.step()

    # store training prediction
    train_fold = {}
    for t in range(1000, num_iterations + 1, 1000):
        # train_pred[t] = torch.clamp(train_pred[t], min=0.)
        train_pred[t] = torch.split(train_pred[t], train_aggregate.shape[0])
        train_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                train_fold[t][appliance_num] = train_pred[t][appliance_num].cpu().data.numpy().reshape(-1, 24)
        else:
            for appliance_num, appliance in enumerate(ORDER):
                train_fold[t][appliance_num] = train_pred[t][appliance_num].data.numpy().reshape(-1, 24)


    # test one validation set

    valid_fold = {}
    for t in range(1000, num_iterations+1, 1000):

        valid_pred[t] = torch.split(valid_pred[t], valid_aggregate.shape[0])
        valid_fold[t] = [None for x in range(len(ORDER))]
        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][appliance_num].cpu().data.numpy().reshape(-1, 24)
        else:
            for appliance_num, appliance in enumerate(ORDER):
                valid_fold[t][appliance_num] = valid_pred[t][appliance_num].data.numpy().reshape(-1, 24)
    
    # store gound truth of validation set
    gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        gt_fold[appliance_num] = valid[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(valid_aggregate.shape[0], -1, 
                                                                                        1).reshape(-1, 24)

    # calcualte the error of validation set
    error = {}
    for t in range(1000, num_iterations+1, 1000):
        error[t] = {}
        for appliance_num, appliance in enumerate(ORDER):
            error[t][appliance] = mean_absolute_error(valid_fold[t][appliance_num], gt_fold[appliance_num])
    
    return train_fold, valid_fold, error
コード例 #9
0
if torch.cuda.is_available():
    cuda_av = True
else:
    cuda_av = False

fold_num = 0
num_folds = 5
cell_type="GRU"
hidden_size = 150
lr = 0.1
bidirectional = True
appliance = "hvac"

torch.manual_seed(0)

appliance_num = APPLIANCE_ORDER.index(appliance)
train, test = get_train_test(2, num_folds=num_folds, fold_num=fold_num)

train_aggregate = train[:, 0, :, :].reshape(train.shape[0], -1, 1)

test_aggregate = test[:, 0, :, :].reshape(test.shape[0], -1, 1)

train_appliance = train[:, appliance_num, :, :].reshape(train.shape[0], -1, 1)
test_appliance = test[:, appliance_num, :, :].reshape(test.shape[0], -1, 1)


loss_func = nn.L1Loss()
r = CustomRNN(cell_type, hidden_size, 1, bidirectional)

if cuda_av:
    r = r.cuda()
コード例 #10
0
preds = []
gts = []
num_folds_run = 5
for fold_num in range(num_folds_run):
    print("-" * 40)
    sys.stdout.flush()
    train, test = get_train_test(num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24)
    #ORDER = APPLIANCE_ORDER[1:][:][::-1]
    ORDER = ['mw', 'dw', 'fridge', 'dr', 'hvac']
    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (train_aggregate.shape[0], -1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_test = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_test[a_num] = Variable(
            torch.Tensor(test[:,
                              APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                  (test_aggregate.shape[0], -1))))
        if cuda_av:
            out_test[a_num] = out_test[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type='GRU',
コード例 #11
0
def dnn_fold(dataset, fold_num, lr, num_iterations, p):
    print (fold_num)
    train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24)
    #ORDER = APPLIANCE_ORDER[1:][:][::-1]
    out_train = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_train[a_num] = Variable(
            torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((train_aggregate.shape[0], -1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    out_test = [None for temp in range(len(ORDER))]
    for a_num, appliance in enumerate(ORDER):
        out_test[a_num] = Variable(
            torch.Tensor(test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape((test_aggregate.shape[0], -1))))
        if cuda_av:
            out_test[a_num] = out_test[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(num_appliance=len(ORDER))
    # for param in a.parameters():
    #    param.data = param.data.abs()
    # print(a)
    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()
    optimizer = torch.optim.Adam(a.parameters(), lr=lr)
    inp = Variable(torch.Tensor(train_aggregate.reshape((train_aggregate.shape[0], -1))).type(torch.FloatTensor),
                   requires_grad=True)
    for t in range(num_iterations):
        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([out_train[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        ot = torch.cat([out_test[appliance_num] for appliance_num, appliance in enumerate(ORDER)])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()
            ot = ot.cuda()

        params = [inp, p]
        for a_num, appliance in enumerate(ORDER):
            params.append(out_train[a_num])
        # print(params)
        pred = a(*params)

        optimizer.zero_grad()
        pred_split = torch.split(pred, pred.size(0) // len(ORDER))

        losses = [loss_func(pred_split[appliance_num], out_train[appliance_num]) * weight_appliance[appliance] for
                  appliance_num, appliance in enumerate(ORDER)]

        loss = sum(losses)/len(ORDER)
        if t % 10 == 0:
            print(t, loss.data[0])

        loss.backward()
        optimizer.step()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()

    params = [test_inp, -2]
    for i in range(len(ORDER)):
        params.append(None)
    pr = a(*params)
    pr = torch.clamp(pr, min=0.)
    test_pred = torch.split(pr, test_aggregate.shape[0])
    prediction_fold = [None for x in range(len(ORDER))]

    if cuda_av:
        for appliance_num, appliance in enumerate(ORDER):
            prediction_fold[appliance_num] = test_pred[appliance_num].cpu().data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(ORDER):
            prediction_fold[appliance_num] = test_pred[appliance_num].data.numpy().reshape(-1, 24)
    gt_fold = [None for x in range(len(ORDER))]
    for appliance_num, appliance in enumerate(ORDER):
        gt_fold[appliance_num] = test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(test_aggregate.shape[0], -1,
                                                                                         1).reshape(-1, 24)

    # preds.append(prediction_fold)
    # gts.append(gt_fold)
    return prediction_fold, gt_fold
コード例 #12
0
ファイル: approach.py プロジェクト: xunyiljg/freq-nilm
def disagg_fold(fold_num, cell_type, hidden_size, num_layers, bidirectional,
                lr, num_iterations, order, p):
    torch.manual_seed(0)

    train, test = get_train_test(num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)

    out_train = [None for temp in range(len(order))]
    for a_num, appliance in enumerate(order):
        out_train[a_num] = Variable(
            torch.Tensor(train[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (train_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[a_num] = out_train[a_num].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, order)

    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()

    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    for t in range(num_iterations):

        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([
            out_train[appliance_num]
            for appliance_num, appliance in enumerate(order)
        ])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()

        pred = a(inp, out_train, p)

        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 5 == 0:
            print(t, loss.data[0])
        loss.backward()
        optimizer.step()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()
    pr = a(test_inp, {appliance: None for appliance in order}, -2)
    pr = torch.clamp(pr, min=0.)
    test_pred = torch.split(pr, test_aggregate.shape[0])
    prediction_fold = [None for x in range(len(order))]
    if cuda_av:
        for appliance_num, appliance in enumerate(order):
            prediction_fold[appliance_num] = test_pred[appliance_num].cpu(
            ).data.numpy().reshape(-1, 24)
    else:
        for appliance_num, appliance in enumerate(order):
            prediction_fold[appliance_num] = test_pred[
                appliance_num].data.numpy().reshape(-1, 24)
    gt_fold = [None for x in range(len(order))]
    for appliance_num, appliance in enumerate(order):
        gt_fold[appliance_num] = test[:,
                                      APPLIANCE_ORDER.
                                      index(appliance), :, :].reshape(
                                          test_aggregate.shape[0], -1,
                                          1).reshape(-1, 24)
    return prediction_fold, gt_fold
コード例 #13
0
def disagg_fold(fold_num, cell_type, hidden_size, num_layers, bidirectional,
                lr, num_iterations, order, p):
    torch.manual_seed(0)

    train, test = get_train_test(num_folds=num_folds, fold_num=fold_num)
    train_aggregate = train[:, 0, :, :].reshape(-1, 24, 1)
    test_aggregate = test[:, 0, :, :].reshape(-1, 24, 1)

    out_train = {}
    out_test = {}
    for a_num, appliance in enumerate(order):
        out_train[appliance] = Variable(
            torch.Tensor(train[:,
                               APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                   (train_aggregate.shape[0], -1, 1))))
        out_test[appliance] = Variable(
            torch.Tensor(test[:,
                              APPLIANCE_ORDER.index(appliance), :, :].reshape(
                                  (test_aggregate.shape[0], -1, 1))))
        if cuda_av:
            out_train[appliance] = out_train[appliance].cuda()
            out_test[appliance] = out_test[appliance].cuda()

    loss_func = nn.L1Loss()
    a = AppliancesRNN(cell_type, hidden_size, num_layers, bidirectional, order)

    if cuda_av:
        a = a.cuda()
        loss_func = loss_func.cuda()

    optimizer = torch.optim.Adam(a.parameters(), lr=lr)

    for t in range(num_iterations):

        inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
        out = torch.cat([
            out_train[appliance]
            for appliance_num, appliance in enumerate(order)
        ])
        if cuda_av:
            inp = inp.cuda()
            out = out.cuda()

        params = [inp, p]
        for appliance in order:
            params.append(out_train[appliance])

        pred = a(*params)
        optimizer.zero_grad()
        loss = loss_func(pred, out)
        if t % 5 == 0:
            print(t, loss.data[0])
        loss.backward()
        optimizer.step()

    test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
    if cuda_av:
        test_inp = test_inp.cuda()
    test_params = [test_inp, -2]
    for appliance in order:
        test_params.append(None)
    pr = a(*test_params)
    pr = torch.clamp(pr, min=0.)
    """
    for appliance_num, appliance in enumerate(order):
        print(loss_func(pr[appliance_num], out_test[appliance]))
    """

    return pr
コード例 #14
0
        e1 = self.conv1(input)
        bn1 = self.bn1(self.act(e1))
        e2 = self.bn2(self.conv2(bn1))
        
        e5 = self.bn5(self.conv5(e2))
        e6 = self.conv6(e5)

        return e6



# Input parameters
dataset, fold_num, appliance, lr, iterations = sys.argv[1:]
dataset = int(dataset)
fold_num = int(fold_num)
appliance_index = APPLIANCE_ORDER.index(appliance)
lr = float(lr)
iterations = int(iterations)
num_folds = 5
print(dataset, fold_num, appliance, lr, iterations)

# prepare the data
train, test = get_train_test(dataset, num_folds=num_folds, fold_num=fold_num)
valid = train[int(0.8*len(train)):].copy()
train = train[:int(0.8 * len(train))].copy()
train_aggregate = train[:, 0, :, :].reshape(train.shape[0], 1, -1, 24)
valid_aggregate = valid[:, 0, :, :].reshape(valid.shape[0], 1, -1, 24)
test_aggregate = test[:, 0, :, :].reshape(test.shape[0], 1, -1, 24)

# Initialize model and loss function.
model = Net()
コード例 #15
0
def disagg(dataset, cell_type, hidden_size, num_layers, bidirectional, lr,
           num_iterations, p):
    preds = []
    gts = []
    for fold_num in range(num_folds):
        print("-" * 40)
        sys.stdout.flush()
        train, test = get_train_test(dataset,
                                     num_folds=num_folds,
                                     fold_num=fold_num)
        train_aggregate = train[:, 0, :, :].reshape(-1, 24)
        test_aggregate = test[:, 0, :, :].reshape(-1, 24)
        #ORDER = APPLIANCE_ORDER[1:][:][::-1]
        # ORDER = ['mw','dw','fridge','dr','hvac']
        out_train = [None for temp in range(len(ORDER))]
        for a_num, appliance in enumerate(ORDER):
            out_train[a_num] = Variable(
                torch.Tensor(
                    train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
                        (train_aggregate.shape[0], -1))))
            if cuda_av:
                out_train[a_num] = out_train[a_num].cuda()

        out_test = [None for temp in range(len(ORDER))]
        for a_num, appliance in enumerate(ORDER):
            out_test[a_num] = Variable(
                torch.Tensor(
                    test[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
                        (test_aggregate.shape[0], -1))))
            if cuda_av:
                out_test[a_num] = out_test[a_num].cuda()

        loss_func = nn.L1Loss()
        a = AppliancesRNN(cell_type,
                          hidden_size,
                          num_layers,
                          bidirectional,
                          num_appliance=len(ORDER))
        for param in a.parameters():
            param.data = param.data.abs()
        # print(a)
        if cuda_av:
            a = a.cuda()
            loss_func = loss_func.cuda()
        optimizer = torch.optim.Adam(a.parameters(), lr=lr)
        inp = Variable(torch.Tensor(
            train_aggregate.reshape(
                (train_aggregate.shape[0], -1))).type(torch.FloatTensor),
                       requires_grad=True)
        for t in range(num_iterations):
            inp = Variable(torch.Tensor(train_aggregate), requires_grad=True)
            out = torch.cat([
                out_train[appliance_num]
                for appliance_num, appliance in enumerate(ORDER)
            ])
            ot = torch.cat([
                out_test[appliance_num]
                for appliance_num, appliance in enumerate(ORDER)
            ])
            if cuda_av:
                inp = inp.cuda()
                out = out.cuda()
                ot = ot.cuda()

            params = [inp, p]
            for a_num, appliance in enumerate(ORDER):
                params.append(out_train[a_num])
            # print(params)
            pred = a(*params)

            optimizer.zero_grad()
            pred_split = torch.split(pred, pred.size(0) // len(ORDER))

            losses = [
                loss_func(pred_split[appliance_num], out_train[appliance_num])
                * weight_appliance[appliance]
                for appliance_num, appliance in enumerate(ORDER)
            ]

            loss = sum(losses) / len(ORDER)
            if t % 20 == 0:
                print(t, loss.data[0])
                sys.stdout.flush()

            loss.backward()
            optimizer.step()

        test_inp = Variable(torch.Tensor(test_aggregate), requires_grad=False)
        if cuda_av:
            test_inp = test_inp.cuda()

        params = [test_inp, -2]
        for i in range(len(ORDER)):
            params.append(None)
        pr = a(*params)
        pr = torch.clamp(pr, min=0.)
        test_pred = torch.split(pr, test_aggregate.shape[0])
        prediction_fold = [None for x in range(len(ORDER))]

        if cuda_av:
            for appliance_num, appliance in enumerate(ORDER):
                prediction_fold[appliance_num] = test_pred[appliance_num].cpu(
                ).data.numpy().reshape(-1, 24)
        else:
            for appliance_num, appliance in enumerate(ORDER):
                prediction_fold[appliance_num] = test_pred[
                    appliance_num].data.numpy().reshape(-1, 24)
        gt_fold = [None for x in range(len(ORDER))]
        for appliance_num, appliance in enumerate(ORDER):
            gt_fold[appliance_num] = test[:,
                                          APPLIANCE_ORDER.
                                          index(appliance), :, :].reshape(
                                              test_aggregate.shape[0], -1,
                                              1).reshape(-1, 24)

        preds.append(prediction_fold)
        gts.append(gt_fold)

    prediction_flatten = {}
    gt_flatten = {}
    for appliance_num, appliance in enumerate(ORDER):
        prediction_flatten[appliance] = []
        gt_flatten[appliance] = []

    for appliance_num, appliance in enumerate(ORDER):
        for fold in range(num_folds):
            prediction_flatten[appliance].append(preds[fold][appliance_num])
            gt_flatten[appliance].append(gts[fold][appliance_num])
        gt_flatten[appliance] = np.concatenate(gt_flatten[appliance])
        prediction_flatten[appliance] = np.concatenate(
            prediction_flatten[appliance])

    err = {}
    for appliance in ORDER:
        print(appliance)
        sys.stdout.flush()
        err[appliance] = mean_absolute_error(gt_flatten[appliance],
                                             prediction_flatten[appliance])
    return err
コード例 #16
0
a = AppliancesRNN(input_dim, hidden_size, 1, len(ORDER))
# print(cuda_av)
if cuda_av:
    a = a.cuda()
# print(a)
# Storing predictions per iterations to visualise later
predictions = []

optimizer = torch.optim.Adam(a.parameters(), lr=2)
loss_func = nn.L1Loss().cuda()

out_train = {}
for appliance in ORDER:
    out_train[appliance] = Variable(
        torch.Tensor(train[:, APPLIANCE_ORDER.index(appliance), :, :].reshape(
            (train_agg.shape[0], -1, 1))))
    if cuda_av:
        out_train[appliance] = out_train[appliance].cuda()

inp = Variable(torch.Tensor(train_agg.reshape(
    (train_agg.shape[0], -1, 1))).type(torch.FloatTensor),
               requires_grad=True)
if cuda_av:
    inp = inp.cuda()
for t in range(num_iterations):
    import pdb

    # pdb.set_trace()
    out = torch.cat([out_train[appliance] for appliance in ORDER])