Esempio n. 1
0
def nn_vm2pc_pred(df_list, power_df=None):
    minmax_scaler = MinMaxScaler()
    minmax_scaler.fit(minmax_list)
    print(type(power_df))
    if isinstance(power_df, pd.DataFrame):
        df_list, power_df = data_preprocess(df_list, power_df)
    else:
        df_list = data_preprocess(df_list)
    if len(df_list) == 1:
        df_sum = df_list[0]
    else:
        df_sum = vmsum2one(df_list)
    X_minmax = minmax_scaler.transform(df_sum)
    X_minmax_ts = torch.tensor(X_minmax).cuda()
    net = torch.load(
        '/home/fc10382/Mcoder/Django/algorithm/dcpower/linear1.pkl')
    y_pred_ts = net(X_minmax_ts)
    if isinstance(power_df, pd.DataFrame):
        y_pred_df = pd.DataFrame(y_pred_ts.cpu().detach().numpy(),
                                 index=power_df.index,
                                 columns=['pdu.pred'])
        y_df = pd.concat(
            [power_df + power_baseline, y_pred_df + power_baseline], axis=1)
        print(y_df)
        y_json = y_df.to_json(orient='columns')
        power_list = json.loads(y_json)
        #print(power_list['pdu.power'])
        return [power_list['pdu.power'], power_list['pdu.pred']]
    else:
        y_pred_df = pd.DataFrame(y_pred_ts.cpu().detach().numpy(),
                                 index=df_sum.index,
                                 columns=['pdu.pred']) + power_baseline
        y_json = y_pred_df.to_json(orient='columns')
        power_list = json.loads(y_json)
        return [power_list['pdu.pred']]
Esempio n. 2
0
def calc_cpu_usage(data_cpu, cpu_count):
    data_cpu = list(map(lambda x: x.diff().iloc[1:, :], data_cpu))
    cpu_df = data_preprocess(data_cpu)
    for i, per_cpu_df in enumerate(cpu_df):
        per_cpu_df['cpu.%d.cpu.total' % (i)] = per_cpu_df.sum(axis=1)
        per_cpu_df['cpu.%d.cpu.total' %
                   (i)] = 1 - per_cpu_df.iloc[:, 0] / per_cpu_df.iloc[:, -1]
        print(per_cpu_df.iloc[:, -1])
        if i == 0:
            cpu_all_df = per_cpu_df.iloc[:, -1]
        else:
            cpu_all_df += per_cpu_df.iloc[:, -1]
    cpu_all_df *= 100
    cpu_all_json = json.loads(cpu_all_df.to_json(orient='index'))
    #print(cpu_all_json)
    return cpu_all_json
Esempio n. 3
0
def nn_vm2pc_train(df_list, power_df):
    df_list, power_df = data_preprocess(df_list, power_df)
    df_sum = vmsum2one(df_list)
    minmax_scaler = MinMaxScaler()
    minmax_scaler.fit(minmax_list)
    X_minmax = minmax_scaler.transform(df_sum)
    #print(X_minmax)
    # X_minmax = df_sum.values
    y_np = power_df.values
    #param_best = gridsearch_lasso_best(X_minmax, y_np)['alpha']
    nrmse_best = 100
    ssplit = ShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
    for train_index, test_index in ssplit.split(X_minmax, y_np):
        #model = Lasso(alpha=param_best)
        X_train, X_test = X_minmax[train_index, :], X_minmax[test_index, :]
        y_train, y_test = y_np[train_index], y_np[test_index]
        X_train_ts = torch.tensor(X_train).cuda()
        y_train_ts = torch.tensor(y_train).double().cuda()
        net = vm_net_1d(6, 1).double().cuda()
        optimizer = torch.optim.Adam(net.parameters(), lr=LR)
        # loss_fun = VmLoss(1, 1000)
        loss_fun = torch.nn.MSELoss()
        for t in range(10000):
            y_train_pred_ts = net(X_train_ts)
            loss = loss_fun(y_train_ts, y_train_pred_ts)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if t % 100 == 0:
                print(loss.cpu().detach().numpy())
        X_test_ts = torch.tensor(X_test).cuda()
        y_test_ts = torch.tensor(y_test).cuda()
        y_pred_ts = net(X_test_ts).double()
        nrmse_tmp = nrmse(y_test, y_pred_ts.cpu().detach().numpy())
        if nrmse_tmp < nrmse_best:
            #mfile = open('/home/fc10382/Mcoder/Django/algorithm/dcpower/lasso.pkl', 'wb')
            print(net.linear.weight)
            print('NRMSE:', nrmse_tmp)
            nrmse_best = nrmse_tmp
            torch.save(
                net,
                '/home/fc10382/Mcoder/Django/algorithm/dcpower/linear1.pkl')
Esempio n. 4
0
def nn_toy(df_list, power_df):
    df_list, power_df = data_preprocess(df_list, power_df)
    df_sum = vmsum2one(df_list)
    minmax_scaler = MinMaxScaler()
    # minmax_scaler.fit(minmax_list)
    # X_minmax = minmax_scaler.transform(df_sum)
    X_minmax = minmax_scaler.fit_transform(df_sum)
    y_np = power_df.values.astype(np.float)
    X_train = X_minmax[:-200, :]
    y_train = y_np[:-200]
    X_test = X_minmax[-200:, :]
    y_test = y_np[-200:]
    X_minmax_ts = torch.tensor(X_train, dtype=torch.float, requires_grad=True)
    y_ts = torch.tensor(y_train, dtype=torch.float)
    X_test_ts = torch.tensor(X_test, dtype=torch.float, requires_grad=True)
    y_test_ts = torch.tensor(y_test, dtype=torch.float)
    data_ds = TensorDataset(X_minmax_ts, y_ts)
    data_dl = DataLoader(data_ds, batch, shuffle=True)
    # model = linear_model()
    model = nn.Linear(6, 1)
    torch.nn.init.xavier_uniform_(model.weight)
    print(model.weight)
    print(model.bias)
    loss_fun = VmLoss(1, 1000)
    optimizer = torch.optim.Adam(model.parameters(), lr=LR)
    for epoch in range(EPOCH):
        for x, y in data_dl:
            pred = model(x)
            loss = loss_fun(y, pred)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
        if epoch % 10 == 0:
            print(loss.detach().numpy())
            print(model.weight, model.bias)
            print(model.weight.grad, model.bias.grad)
    y_pred_ts = model(X_test_ts)
    nrmse_val = nrmse(y_test, y_pred_ts.detach().numpy())
    print(nrmse_val)
    return y_test, y_pred_ts.detach().numpy()
Esempio n. 5
0
def calc_docker_power(df, vmno, dockerno=None):
    df_list = data_preprocess(df)
    #data_df = df.dropna()
    # mfile = open('dcpower/model/lasso.pkl', 'rb')
    mfile = open('dcpower/model/rfr.pkl', 'rb')
    lasso = pickle.load(mfile)
    mfile.close()
    minmax_scaler = MinMaxScaler()
    minmax_scaler.fit(minmax_list)
    y_pred_mean_list = []
    for data_df in df_list:
        X_minmax = minmax_scaler.transform(data_df)
        y_pred = lasso.predict(X_minmax)
        y_pred_mean_list.append(pd.DataFrame(y_pred).mean().values[0])
        print('y_pred: ', y_pred)
    power_sum = sum(y_pred_mean_list[-2:])
    print(y_pred_mean_list)
    if power_sum > y_pred_mean_list[0]:
        percent_1 = y_pred_mean_list[-2] / power_sum + random.uniform(
            -0.1, 0.1)
        y_pred_mean_list[-2] = y_pred_mean_list[0] * percent_1
        y_pred_mean_list[-1] = y_pred_mean_list[0] * (1 - percent_1)
    return y_pred_mean_list