Пример #1
0
def plot_trajectory(savefig=False):
    ## Phase space
    plt.figure(figsize=(12, 6))
    plt.title('Phase Space')
    plt.xlabel('q')
    plt.ylabel('p')
    plt.plot(env.trajectory[:, 0],
             env.trajectory[:, 1],
             '--k',
             label='Ground truth')
    plt.plot(env.X[:, 0], env.X[:, 1], 'xk', label='Training data', alpha=0.3)
    plt.plot(cdl_data[:, 0],
             cdl_data[:, 1],
             '-',
             label='CD-Lagrange, RMSE: {:.3f}'.format(RMSE(env, cdl_data)))
    plt.plot(resnet_data[:, 0],
             resnet_data[:, 1],
             '-',
             label='ResNet, RMSE: {:.3f}'.format(RMSE(env, resnet_data)))
    if _train_vin:
        plt.plot(vin_data[:, 0],
                 vin_data[:, 1],
                 '-',
                 label='VIN VV, RMSE: {:.3f}'.format(RMSE(env, vin_data)))
    plt.legend()
    if savefig:
        plt.savefig(env.get_filename('trajectory'))
    plt.show()
def plot_trajectory(savefig=False):
    # bottom_right=(0.3, -2.1)
    top_right = (0.25, 1.95)

    plt.figure(figsize=(14, 10))
    plt.subplot(2, 2, 1)
    plt.title('Ground Truth')
    plt.plot(env.trajectory[:, 0], env.trajectory[:, 2], 'o--', label='1')
    plt.plot(env.trajectory[:, 1], env.trajectory[:, 3], 'o--', label='2')
    plt.legend()

    plt.subplot(2, 2, 2)
    plt.title('CD-Lagrange')
    rmse_cdl = RMSE(env, cdl_data)
    plt.text(top_right[0], top_right[1], 'RMSE={:.3f}'.format(rmse_cdl))
    plt.plot(cdl_data[:, 0], cdl_data[:, 2], 'o--', label='1')
    plt.plot(cdl_data[:, 1], cdl_data[:, 3], 'o--', label='2')
    plt.legend()

    plt.subplot(2, 2, 3)
    plt.title('ResNet')
    rmse_resnet = RMSE(env, resnet_data)
    plt.text(top_right[0], top_right[1], 'RMSE={:.3f}'.format(rmse_resnet))
    plt.plot(resnet_data[:, 0], resnet_data[:, 2], 'o--', label='1')
    plt.plot(resnet_data[:, 1], resnet_data[:, 3], 'o--', label='2')
    plt.legend()
    if savefig:
        plt.savefig(env.get_filename('trajectory'))
    plt.show()
Пример #3
0
def test(test_data_loader, model):
    srocc = SROCC()
    plcc = PLCC()
    rmse = RMSE()
    len_test = len(test_data_loader)
    pb = ProgressBar(len_test, show_step=True)

    print("Testing")

    model.eval()
    with torch.no_grad():
        for i, ((img, ref), score) in enumerate(test_data_loader):
            img, ref = img.cuda(), ref.cuda()
            output = model(img, ref).cpu().data.numpy()
            score = score.data.numpy()

            srocc.update(score, output)
            plcc.update(score, output)
            rmse.update(score, output)

            pb.show(
                i, "Test: [{0:5d}/{1:5d}]\t"
                "Score: {2:.4f}\t"
                "Label: {3:.4f}".format(i + 1, len_test, float(output),
                                        float(score)))

    print("\n\nSROCC: {0:.4f}\n"
          "PLCC: {1:.4f}\n"
          "RMSE: {2:.4f}".format(srocc.compute(), plcc.compute(),
                                 rmse.compute()))
Пример #4
0
    def solve(self,
              A: torch.Tensor,
              b: torch.Tensor,
              sizeA: tuple,
              initialX: torch.Tensor = None):
        x = initialX if initialX is not None else torch.randn(
            sizeA[0], dtype=torch.float32, device=global_device)
        assert sizeA[0] == sizeA[1]
        diff: float = 0.0
        iter = 0
        r = b - sparseMatmul(A, x, sizeA)
        p = r.clone()
        lastRR = torch.mul(r, r).sum()

        for iter in range(self.maxiter):
            Ap = sparseMatmul(A, p, sizeA)
            a = lastRR / torch.mul(p, Ap).sum()
            newX = x + a * p
            newR = r - a * Ap
            newRR = torch.mul(newR, newR).sum()
            b = newRR / lastRR
            p = newR + b * p
            r = newR
            lastRR = newRR
            diff = RMSE(x, newX)
            x = newX
            if diff <= self.tolerance:
                break
        print(iter + 1, diff)
        return x
Пример #5
0
    def solve(self,
              A: torch.Tensor,
              b: torch.Tensor,
              sizeA: tuple,
              initialX: torch.Tensor = None):
        x = initialX if initialX is not None else torch.randn(
            sizeA[0], dtype=torch.float32, device=global_device)
        assert sizeA[0] == sizeA[1]
        dia = torch.zeros(sizeA[0], dtype=torch.float32, device=global_device)
        boolDiagonalInA = A[:, 0] == A[:, 1]
        dia[A[boolDiagonalInA, 0].to(torch.int64)] = A[boolDiagonalInA,
                                                       2]  # 对角元
        A = A[~boolDiagonalInA]  # A现在仅含有非对角元元素的稀疏表示
        diff: float = 0.0
        iter = 0

        for iter in range(self.maxiter):
            AxNoDiag = sparseMatmul(A, x, sizeA)
            newX = (b - AxNoDiag) / dia
            diff = RMSE(x, newX)
            x = newX
            if diff <= self.tolerance:
                break
        print(iter + 1, diff)
        return x
def main(input_img, Threshold):
    #compute binary image using threshold
    out_img = (255 * (input_img > Threshold).astype(np.int)).astype(np.uint8)
    #compute rmse
    rmse = RMSE(input_img, out_img)
    fidelity = Fidelity(input_img, out_img)
    return out_img, rmse, fidelity
Пример #7
0
def evalvqa(evalset, predictions, isVQAeval=True):
    predans = []
    MCans = []
    VQA_acc = 0

    assert_eq(len(evalset), len(predictions))

    for i, ent in enumerate(evalset):
        #all qids are integers
        qid = int(ent['question_id'])
        #all predictions/answers are string
        pred = str(predictions[qid])
        if isVQAeval:
            ansfreq = Counter([ans['answer'] for ans in ent['answers']])
            MC = ent['multiple_choice_answer']
            #prediction is a string
            agreeing = ansfreq[pred]
            ans_p = min(agreeing * 0.3, 1)
        else:  # not VQA style
            MC = ent['answer']
            ans_p = (MC == pred)

        VQA_acc += ans_p
        predans.append(int(pred))
        MCans.append(int(MC))

    VQA_acc = 100.0 * VQA_acc / len(evalset)
    rmse = RMSE(MCans, predans)
    return VQA_acc, rmse
 def generateFinalResult(
         self, vddRes: torch.Tensor, gndRes: torch.Tensor
 ) -> Tuple[Dict[str, float], Union[float, None]]:
     """
     把Tensor格式的结果数组转化为结点的字典。
     :param vddRes: <Tensor n_vdd> n_vdd是vdd平面待定结点个数
     :param gndRes: <Tensor n_gnd> n_gnd是gnd平面待定结点个数
     :return: (计算结果-node名称到算出的电压值对应的字典, 均方误差开根号(RMSE))
     """
     result = {}
     for nodename in self.nodenameDict:
         idx = self.nodenameDict[nodename]
         if self.connectToVSource[idx]:
             result[nodename] = self.nodeVoltages[idx]
         else:
             if self.nodeLabel[idx] == L_VDD:
                 result[nodename] = vddRes[self.vddMapping[idx]].item()
             elif self.nodeLabel[idx] == L_GND:
                 result[nodename] = gndRes[self.gndMapping[idx]].item()
     rmse = None
     if len(self.groundTruth) > 0:
         # 计算RMSE
         gtArr = []
         resArr = []
         for nodename in result:
             resArr.append(result[nodename])
             gtArr.append(self.groundTruth[nodename])
         rmse = RMSE(torch.tensor(gtArr, device=global_device),
                     torch.tensor(resArr, device=global_device))
     return result, rmse
Пример #9
0
    def solve(self,
              A: torch.Tensor,
              b: torch.Tensor,
              sizeA: tuple,
              initialX: torch.Tensor = None):
        assert sizeA[0] == sizeA[1]
        diff: float = 0.0
        iter = 0

        with torch.enable_grad():
            x = initialX if initialX is not None else torch.randn(
                sizeA[0], dtype=torch.float32, device=global_device)

            for iter in range(self.maxiter):
                x.requires_grad = True
                Ax = sparseMatmul(A, x, sizeA)
                loss = torch.norm(Ax - b)
                loss.backward()
                newX = (x - self.lr * x.grad).detach()
                diff = RMSE(x, newX)
                x = newX
                if diff <= self.tolerance:
                    break
        print(iter + 1, diff)
        return x
    def compile(self):
        self.is_compiled = True
        self.other_args = other_args
        self.batch_size = batch_size

        self.loss = nn.MSELoss()
        self.optimizer = optimizer_dict[opt_type](self.parameters(), **other_args)
        self.metrics = [MAE(), RMSE(), RMSLE(), NDEI(), R2_SCORE(), MAAPE()]
def plot_trajectory(savefig=False):

    bottom_right = (0.3, -2.1)
    top_right = (0.25, 1.95)
    rmse_pos = bottom_right

    plt.figure(figsize=(8, 6.6))
    plt.subplot(2, 2, 1)
    plt.title('Ground Truth')
    plt.plot(env.trajectory[:, 0], env.trajectory[:, 2], '--', label='1')
    plt.plot(env.trajectory[:, 1], env.trajectory[:, 3], '--', label='2')

    plt.plot(env.X[:, 0], env.X[:, 2], 'C0x', label='Data 1', alpha=0.3)
    plt.plot(env.X[:, 1], env.X[:, 3], 'C1x', label='Data 2', alpha=0.3)
    #     plt.legend()

    plt.subplot(2, 2, 2)
    plt.title('CD-Lagrange')
    rmse_cdl = RMSE(env, cdl_data)
    print('CDL RMSE={:.3f}'.format(rmse_cdl))
    plt.plot(cdl_data[:, 0], cdl_data[:, 2], 'x--', label='1')
    plt.plot(cdl_data[:, 1], cdl_data[:, 3], 'x--', label='2')
    #     plt.legend()

    plt.subplot(2, 2, 3)
    plt.title('ResNet')
    rmse_resnet = RMSE(env, resnet_data)
    print('Resnet RMSE={:.3f}'.format(rmse_resnet))
    plt.plot(resnet_data[:, 0], resnet_data[:, 2], 'x--', label='1')
    plt.plot(resnet_data[:, 1], resnet_data[:, 3], 'x--', label='2')
    #     plt.legend()

    plt.subplot(2, 2, 4)
    plt.title('ResNet Contact')
    rmse_resnet_c = RMSE(env, resnet_c_data)
    print('ResnetContact RMSE={:.3f}'.format(rmse_resnet_c))
    plt.plot(resnet_c_data[:, 0], resnet_c_data[:, 2], 'x--', label='1')
    plt.plot(resnet_c_data[:, 1], resnet_c_data[:, 3], 'x--', label='2')
    #     plt.legend()
    plt.tight_layout()

    if savefig:
        plt.savefig(env.get_filename('trajectory'), bbox_inches="tight")
    plt.show()
    def compile(self):
        #, training_dataset, validation_dataset):  #compile(loss=criterion, optimizer=optimizer, metrics=metrics,
        # loss_weights=loss_weights)
        # self.optimizer = optimizer
        self.is_compiled = True
        self.other_args = other_args
        self.batch_size = batch_size

        # self.train_dataset = training_dataset
        # self.val_dataset = validation_dataset

        self.loss = nn.MSELoss()
        self.optimizer = optimizer_dict[opt_type](self.parameters(), **other_args)
        self.metrics = [MAE(), RMSE(), RMSLE(), NDEI(), R2_SCORE(), MAAPE()]
Пример #13
0
def test(test_data_loader, model):
    scores = []
    srocc = SROCC()
    plcc = PLCC()
    rmse = RMSE()
    len_test = len(test_data_loader)
    pb = ProgressBar(len_test-1, show_step=True)

    print("Testing")

    model.eval()
    with torch.no_grad():
        for i, ((img, ref), score) in enumerate(test_data_loader):
            img, ref = img.cuda(), ref.cuda()
            output = model(img, ref).cpu().data.numpy()
            score = score.data.numpy()

            srocc.update(score, output)
            plcc.update(score, output)
            rmse.update(score, output)

            pb.show(i, 'Test: [{0:5d}/{1:5d}]\t'
                    'Score: {2:.4f}\t'
                    'Label: {3:.4f}'
                    .format(i, len_test, float(output), float(score)))

            scores.append(output)
    
    # Write scores to file
    with open('../test/scores.txt', 'w') as f:
        stat = list(map(lambda s: f.write(str(s)+'\n'), scores))

    print('\n\nSROCC: {0:.4f}\n'
            'PLCC: {1:.4f}\n'
            'RMSE: {2:.4f}'
            .format(srocc.compute(), plcc.compute(), rmse.compute())
    )
Пример #14
0
def Cal_eval_index(epoch, out, val_target, arr, validation_losses,
                   validation_MAE, validation_RMSE, validation_MAPE, means,
                   stds):
    # def Cal_eval_index(epoch, out, val_target, arr, validation_losses, validation_MAE, validation_RMSE, validation_MAPE, max_X, min_X):
    for item in arr:
        # out_index = out[:, :, item, :]
        # val_target_index = val_target[:, :, item, :]
        out_index = out  #[:, :, item, :]
        #val_target_index = val_target[:, :, item, :]
        val_target_index = val_target
        out_unnormalized = (out_index * stds + means)  # [85,24,24]
        target_unnormalized = (out_unnormalized * stds + means)
        val_loss = loss_criterion(out_index,
                                  target_unnormalized).to(device="cpu")
        validation_losses.append(np.asscalar(val_loss.detach().numpy()))

        # out_unnormalized = out_index.detach().cpu().numpy() * stds + means
        # target_unnormalized = val_target_index.detach().cpu().numpy() * stds + means

        #         pd.DataFrame(i_start_pred).to_csv("./results_oneto_test120/{}_to_others_pred".format(i) + str(epoch) +".csv",index=None,header=None)
        #         pd.DataFrame(i_start_true).to_csv("./results_oneto_test120/{}_to_others_true".format(i) + str(epoch) +".csv",index=None,header=None)

        # 按类分
        out_unnormalized = (out_index.detach().cpu().numpy() * stds + means
                            )  # [85,24,24]
        target_unnormalized = (val_target_index.detach().cpu().numpy() * stds +
                               means)
        if (epoch % 500 == 0) & (epoch != 0):
            spatial_pre4 = []
            spatial_true4 = []

            pd.DataFrame(spatial_pre4).to_csv("" + str(epoch) + ".csv",
                                              index=None,
                                              header=None)
            pd.DataFrame(spatial_true4).to_csv("" + str(epoch) + ".csv",
                                               index=None,
                                               header=None)
            print()

        mae = MAE(target_unnormalized, out_unnormalized)
        validation_MAE.append(mae)

        rmse = RMSE(target_unnormalized, out_unnormalized)
        validation_RMSE.append(rmse)

        mape = MAPE(target_unnormalized, out_unnormalized)
        validation_MAPE.append(mape)

    return validation_losses, validation_MAE, validation_RMSE, validation_MAPE
Пример #15
0
    def train(self, train_data=None, vali_data=None):
        train_loss_list = []
        vali_rmse_list = []
        last_vali_rmse = None

        # monemtum
        momuntum_u = np.zeros(self.U.shape)
        momuntum_v = np.zeros(self.V.shape)

        for it in range(self.iterations):
            # derivate of Vi
            grads_u = np.dot(self.I * (self.R - np.dot(self.U, self.V.T)),
                             -self.V) + self.lambda_alpha * self.U

            # derivate of Tj
            grads_v = np.dot((self.I * (self.R - np.dot(self.U, self.V.T))).T,
                             -self.U) + self.lambda_beta * self.V

            # update the parameters
            momuntum_u = (self.momuntum * momuntum_u) + self.lr * grads_u
            momuntum_v = (self.momuntum * momuntum_v) + self.lr * grads_v
            self.U = self.U - momuntum_u
            self.V = self.V - momuntum_v

            # training evaluation
            train_loss = self.loss()
            train_loss_list.append(train_loss)

            vali_preds = self.predict(vali_data)
            vali_rmse = RMSE(vali_data[:, 2], vali_preds)
            vali_rmse_list.append(vali_rmse)

            print('iteration:{: d} ,loss:{: f}, valid_rmse:{: f}'.format(
                it, train_loss, vali_rmse))

            if last_vali_rmse and (last_vali_rmse - vali_rmse) <= 0:
                print('convergence at iterations:{: d}'.format(it))
                break
            else:
                last_vali_rmse = vali_rmse

        return self.U, self.V, train_loss_list, vali_rmse_list
Пример #16
0
def superlinear(Set, Label, ShowSet, ShowLabel):
    from sklearn.linear_model import LinearRegression

    pre = []
    ## model training
    splitnum = len(Set) // 10
    for i in range(10):
        # make data sets
        srange = np.arange(splitnum * i, splitnum * i + splitnum)
        #testset=Set[srange, :]
        #testlabel=Label[srange, :]
        trainset = np.delete(Set, srange, axis=0)
        trainlabel = np.delete(Label, srange, axis=0)

        ## build the model
        lmodel = LinearRegression()
        lmodel.fit(trainset, trainlabel)
        pre.append(lmodel.predict(ShowSet))

    pre = np.mean(np.array(pre), axis=0)
    rmse = RMSE(ShowLabel, pre)
    pe = PE(ShowLabel, pre)
    lc = corrcal(ShowLabel, pre)
    return rmse, pe, lc
Пример #17
0
print()

Numeric_model.numerical_solution(x=x_data,
                                 y=y_data,
                                 epochs=_epoch,
                                 batch_size=_batch_size,
                                 lr=_lr,
                                 optim=optimizer)
print('Trained weight: \n', Numeric_model.W.reshape(-1))
print()

# model evaluation
inference = Numeric_model.eval(x_data)

# Error calculation
error = RMSE(inference, y_data)
print('RMSE on Train Data : %.4f \n' % error)
'''
You should get results as:

Initial weight:
 [0. 0. 0. 0.]

Trained weight:
 [ 22.15868831 -11.56430191  31.24868975  34.05022129]

RMSE on Train Data : 22.1719

'''

#======================================================================================================
print('prediction shape:', pre.shape)
# true value
pre += 8

# plot the results
fig = plt.figure(figsize=(20, 6.8))
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0, hspace=0)
#fig.subplots_adjust(left=0, right=1, wspace=0, hspace=0)
plt.subplots_adjust(wspace=0, hspace=0.4)
res = []
oneyear = 344
for i in range(7):
    fluxtrue = Label[i * oneyear:(i + 1) * oneyear].reshape(-1, 1)
    prediction = pre[i, :].reshape(-1, 1)
    PE1 = PE(fluxtrue, prediction)
    RMSE1 = RMSE(fluxtrue, prediction)
    linreg = LinearRegression()
    line1 = linreg.fit(fluxtrue, prediction)
    Cor1 = np.corrcoef(fluxtrue.reshape(-1), prediction.reshape(-1))[0, 1]
    res.append([RMSE1, PE1, Cor1])
    #### figure plot
    fsize = 15
    xx = np.arange(5.5, 10, 0.1)

    # plot the flux
    fig.add_subplot(2, 8, i + 1)
    plt.plot(fluxtrue, 'b', linewidth=0.8)
    #plt.plot(prediction, 'r', linewidth=0.8)
    plt.xlim([0, 344])
    plt.ylim([6, 10.5])
    plt.xticks([])
def supermodel2(Set, Label, ShowSet, ShowLabel, config):
    import time
    T0 = time.time()

    trloss, teloss, shloss, trainPE, testPE, showPE, trainCC, testCC, showCC, pre1, pre2, pre3 = \
        [], [], [], [], [], [], [], [], [], [], [], []

    seed = 1
    ## parameters of the network
    num_in = 85
    node1 = 25
    node2 = 25
    ## parameters of training
    batch_size = 200
    epochs = 1500
    learnrate = 0.01
    reg2 = 0.001
    Loss = 'L2'
    f1 = 'selu'
    f2 = 'sigmoid'

    if hasattr(config, 'seed'):
        seed = config.seed

    if hasattr(config, 'num_in'):
        num_in = config.num_in
    if hasattr(config, 'node1'):
        node1 = config.node1
    if hasattr(config, 'node2'):
        node2 = config.node2

    if hasattr(config, 'batch_size'):
        batch_size = config.batch_size
    if hasattr(config, 'epochs'):
        epochs = config.epochs
    if hasattr(config, 'learnrate'):
        learnrate = config.learnrate
    if hasattr(config, 'reg2'):
        reg2 = config.reg2
    if hasattr(config, 'Loss'):
        Loss = config.Loss
    if hasattr(config, 'fun1'):
        f1 = config.fun1
    if hasattr(config, 'fun2'):
        f2 = config.fun2

    ## model training (10 cross validation)
    splitnum = len(Set) // 10
    NN = Set.shape[1]
    for i in range(10):
        # make data sets
        srange = np.arange(splitnum * i, splitnum * i + splitnum)
        testset = Set[srange, :]
        testlabel = Label[srange, :]
        trainset = np.delete(Set, srange, axis=0)
        trainlabel = np.delete(Label, srange, axis=0)

        # training set shuffle
        data2 = np.hstack((trainset, trainlabel))
        np.random.seed(2020)  # This year is 2020
        np.random.shuffle(data2)
        trainset = data2[:, 0:-1]
        trainlabel = data2[:, -1].reshape(-1, 1)

        ## build model
        x = tf.placeholder(tf.float32, [None, num_in])
        y0 = tf.placeholder(tf.float32, [None, 1])
        y1, totalloss, loss = create_model2( \
            x, y0, num_in, reg2=reg2, node1=node1, node2=node2, \
            seed=seed, Loss=Loss, f1=f1, f2=f2)

        # Adam optimizer
        learningrate_base = learnrate
        learningrate_decay = 0.999
        global_step = tf.Variable(0, trainable=False)
        learning_rate = tf.train.exponential_decay(learningrate_base,
                                                   global_step,
                                                   epochs // batch_size,
                                                   learningrate_decay,
                                                   staircase=False)
        trainstep = tf.train.AdamOptimizer(learning_rate).minimize(
            totalloss, global_step=global_step)

        # training
        with tf.Session() as sess:
            init_op = tf.global_variables_initializer()
            sess.run(init_op)
            X = trainset
            Y = trainlabel
            for ii in range(epochs):
                start = (ii * batch_size) % len(trainlabel)
                end = start + batch_size
                sess.run(trainstep,
                         feed_dict={
                             x: X[start:end],
                             y0: Y[start:end]
                         })
                # if ii%200==0:
                #     print('Loss:', sess.run(loss, feed_dict={x: X[start:end], y0: Y[start:end]}))
                #     print('Total Loss:', sess.run(totalloss, feed_dict={x: X[start:end], y0: Y[start:end]}))
            trainloss = sess.run(loss, feed_dict={x: X, y0: Y})
            print('##-------------------#', i)
            print('total_loss1:', trainloss)
            ### evaluation of the results
            prediction1 = sess.run(y1, feed_dict={x: trainset})
            prediction2 = sess.run(y1, feed_dict={x: testset})
            prediction3 = sess.run(y1, feed_dict={x: ShowSet})
            pre2.append(prediction2.reshape(-1))
            pre3.append(prediction3.reshape(-1))
            # true value of trainlabel and prediction1
            trainlabel += 8
            prediction1 += 8

            trloss.append(RMSE(trainlabel, prediction1))
            trainPE.append(PE(trainlabel, prediction1))
            trainCC.append(corrcal(trainlabel, prediction1))

    prediction2 = np.array(pre2).reshape(-1)
    prediction3 = np.mean(np.array(pre3), axis=0)
    # true value of prediction2 and prediction3, Label and ShowLabel
    prediction2 += 8
    prediction3 += 8
    Label += 8
    ShowLabel += 8

    trloss = np.mean(np.array(trloss))
    trainPE = np.mean(np.array(trainPE))
    trainCC = np.mean(np.array(trainCC))

    # calculate teloss, shloss, testPE, showPE
    ## attention, due to "//10", the datasize may not be consistent
    prediction2_num = len(prediction2)
    teloss = RMSE(Label.reshape(-1)[0:prediction2_num], prediction2)
    testPE = PE(Label.reshape(-1)[0:prediction2_num], prediction2)
    testCC = corrcal(Label.reshape(-1)[0:prediction2_num], prediction2)

    shloss = RMSE(ShowLabel.reshape(-1), prediction3)
    showPE = PE(ShowLabel.reshape(-1), prediction3)
    showCC = corrcal(ShowLabel.reshape(-1), prediction3)

    TT = time.time()
    print('Time', TT - T0)
    print(
        '>>>>>>>>>>\n>>>>>>>>>>>>>>>>>>>>>>>>\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
    )
    return trloss, teloss, shloss, trainPE, testPE, showPE, trainCC, testCC, showCC, TT - T0
Пример #20
0
                                 lr=space,
                                 optim=optim)
    else:
        model.numerical_solution(x=x_train_data,
                                 y=y_train_data,
                                 epochs=_epoch,
                                 batch_size=space,
                                 lr=_lr,
                                 optim=optim)

    ################### Evaluate on train data
    # Inference
    inference = model.eval(x_train_data)

    # Assess model
    error = RMSE(inference, y_train_data)
    print('[Search %d] RMSE on Train Data : %.4f' % (i + 1, error))

    train_results.append(error)

    ################### Evaluate on test data
    # Inference
    inference = model.eval(x_test_data)

    # Assess model
    error = RMSE(inference, y_test_data)
    print('[Search %d] RMSE on test data : %.4f' % (i + 1, error))

    test_results.append(error)

# ========================= EDIT HERE ========================
Пример #21
0
def plot_trajectory(savefig=False):
    plt.figure(figsize=(12,8))

    plt.title('Phase Space'); plt.xlabel('q'); plt.ylabel('p')
    plt.plot(cdl_data[:, 0], cdl_data[:, 1], 'x--', label='CD-Lagrange, RMSE: {:.3f}'.format(RMSE(env, cdl_data)))
    plt.plot(resnet_data[:, 0], resnet_data[:, 1], 'x--', label='ResNet, RMSE: {:.3f}'.format(RMSE(env, resnet_data)))
    plt.plot(env.trajectory[:, 0], env.trajectory[:, 1], 'xk', label='Ground truth')
    plt.legend()
    if savefig:
        plt.savefig(env.get_filename('trajectory'))
    plt.show()
Пример #22
0
        imp_mean = IterativeImputer(random_state=0, max_iter=50)
        data_nas = copy.deepcopy(ground_truth)
        data_nas[data["mask"][-1] == 1] = np.nan
        imp_mean.fit(data_nas)

        sk_imp = torch.tensor(imp_mean.transform(data_nas))
        mean_imp = (1 - mask) * X_true + mask * mean_impute(X_true, mask)

        mean_sk, cov_sk = moments(sk_imp[M])
        mean_mean, cov_mean = moments(mean_imp[M])

        data["imp"]["scikit"].append(sk_imp.cpu().numpy())
        data["imp"]["mean"].append(mean_imp.cpu().numpy())

        scikit_scores['MAE'].append(MAE(sk_imp, X_true, mask).cpu().numpy())
        scikit_scores['RMSE'].append(RMSE(sk_imp, X_true, mask).cpu().numpy())
        scikit_scores['bures'].append([
            ((mean_sk - mean_truth)**2).sum().item(),
            ns_bures(cov_sk, cov_truth).item()
        ])
        if nimp < OTLIM:
            scikit_scores['OT'].append(ot.emd2(np.ones(nimp) / nimp, np.ones(nimp) / nimp, \
                         ((sk_imp[M][:, None] - X_true[M])**2).sum(2).cpu().numpy()) / 2.)
            logging.info(
                f'scikit imputation :  MAE = {scikit_scores["MAE"][-1]}, OT = {scikit_scores["OT"][-1]}'
            )
        else:
            logging.info(
                f'scikit imputation :  MAE = {scikit_scores["MAE"][-1]}')

        mean_scores['MAE'].append(MAE(mean_imp, X_true, mask).cpu().numpy())
Пример #23
0
                out_img[i,j] = 255
            else: 
                out_img[i,j] = 0
    return out_img.astype(np.uint8)
    
if __name__=="__main__":
    input_img_name = sys.argv[1]
    dither_size = np.int(sys.argv[2])
    output_img_name = get_base(input_img_name)+ '_dither'+str(dither_size)
    gamma=2.2
    print(output_img_name)
    
    im = Image.open(input_img_name)
    input_img = np.array(im)
    
    out_img= main(input_img, dither_size, gamma)
    #compute rmse
    rmse = RMSE(input_img, out_img)
    fidelity = Fidelity(input_img, out_img)
    #print info
    print("RMSE:%0.3f \t Fidelity:%0.3f"%(rmse, fidelity))
    
    #save plots and images
    gray = cm.get_cmap('gray',256)
    plt.figure(frameon=False)
    plt.imshow(out_img, cmap=gray, interpolation='none')
    plt.axis('off')
    plt.savefig(output_img_name+'.pdf', bbox_inches='tight', pad_inches=0)
    
    out_im = Image.fromarray(out_img)
    out_im.save(output_img_name+'.tif')
Пример #24
0
def run(data_path,
        out_path,
        training_dataset,
        n_targets,
        testing_datasets,
        regressor,
        out_log,
        sst_method='predictions',
        sst_n_part=None,
        seed=2018):

    print('Regressor: {}'.format(regressor))
    training_name = os.path.join(data_path, '{}.csv'.format(training_dataset))
    training_data = pd.read_csv(training_name)
    training_data = training_data.values
    X_train, Y_train = training_data[:, :-n_targets], \
        training_data[:, -n_targets:]

    scaler = preprocessing.StandardScaler().fit(X_train)
    X_train = scaler.transform(X_train)

    regr, params = get_regressor(regressor, seed=seed)
    sst = StackedSingleTarget(regressor=regr,
                              regressor_params=params,
                              method=sst_method,
                              n_part=sst_n_part)
    sst.fit(X_train, Y_train)
    print('SST-{} done'.format(sst_method))

    for pos, d in enumerate(testing_datasets):
        regr, params = get_regressor(regressor, seed=seed)
        st = regr(**params)
        st.fit(X_train, Y_train[:, pos])
        print('ST done')

        testing_name = os.path.join(data_path, '{}.csv'.format(d))
        testing_data = pd.read_csv(testing_name)
        testing_data = testing_data.values

        X_test, y_test = testing_data[:, :-1], testing_data[:, -1:]

        X_test = scaler.transform(X_test)
        y_test = y_test[:, 0]
        st_predictions = st.predict(X_test)
        sst_predictions = sst.predict(X_test)

        out_log.loc[pos, 'st_rmse'] = \
            RMSE(y_test, st_predictions)
        out_log.loc[pos, 'st_rrmse'] = \
            RRMSE(y_test, st_predictions)
        out_log.loc[pos, 'sst_rmse'] = \
            RMSE(y_test, sst_predictions[:, pos])
        out_log.loc[pos, 'sst_rrmse'] = \
            RRMSE(y_test, sst_predictions[:, pos])

    if sst_method == 'predictions':
        log_name = 'results_sst_train_test_TgTliq_{}_predictions.csv'.\
                   format(regressor)
    elif sst_method == 'internal_cv':
        log_name = 'results_sst_train_test_TgTliq_{}_internal_cv_{}.csv'.\
                   format(regressor, sst_n_part)
    elif sst_method == 'targets_values':
        log_name = 'results_sst_train_test_TgTliq_{}_targets_values.csv'.\
                   format(regressor)
    out_log.to_csv(os.path.join(out_path, log_name), index=False)
Пример #25
0
def get_acc_rmse(gt, pred):
    acc = accuracy(gt, pred)
    rmse = RMSE(gt, pred)
    return acc, rmse
Пример #26
0
    def fit_transform(self, X, verbose=True, report_interval=500, X_true=None):
        """
        Imputes missing values using a batched OT loss

        Parameters
        ----------
        X : torch.DoubleTensor or torch.cuda.DoubleTensor
            Contains non-missing and missing data at the indices given by the
            "mask" argument. Missing values can be arbitrarily assigned
            (e.g. with NaNs).

        mask : torch.DoubleTensor or torch.cuda.DoubleTensor
            mask[i,j] == 1 if X[i,j] is missing, else mask[i,j] == 0.

        verbose: bool, default=True
            If True, output loss to log during iterations.

        X_true: torch.DoubleTensor or None, default=None
            Ground truth for the missing values. If provided, will output a
            validation score during training, and return score arrays.
            For validation/debugging only.

        Returns
        -------
        X_filled: torch.DoubleTensor or torch.cuda.DoubleTensor
            Imputed missing data (plus unchanged non-missing data).


        """

        X = X.clone()
        n, d = X.shape

        if self.batchsize > n // 2:
            e = int(np.log2(n // 2))
            self.batchsize = 2**e
            if verbose:
                logging.info(
                    f"Batchsize larger that half size = {len(X) // 2}. Setting batchsize to {self.batchsize}."
                )

        mask = torch.isnan(X).double()
        imps = (self.noise * torch.randn(mask.shape).double() +
                nanmean(X, 0))[mask.bool()]
        imps.requires_grad = True

        optimizer = self.opt([imps], lr=self.lr)

        if verbose:
            logging.info(
                f"batchsize = {self.batchsize}, epsilon = {self.eps:.4f}")

        if X_true is not None:
            maes = np.zeros(self.niter)
            rmses = np.zeros(self.niter)

        for i in range(self.niter):

            X_filled = X.detach().clone()
            X_filled[mask.bool()] = imps
            loss = 0

            for _ in range(self.n_pairs):

                idx1 = np.random.choice(n, self.batchsize, replace=False)
                idx2 = np.random.choice(n, self.batchsize, replace=False)

                X1 = X_filled[idx1]
                X2 = X_filled[idx2]

                loss = loss + self.sk(X1, X2)

            if torch.isnan(loss).any() or torch.isinf(loss).any():
                ### Catch numerical errors/overflows (should not happen)
                logging.info("Nan or inf loss")
                break

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if X_true is not None:
                maes[i] = MAE(X_filled, X_true, mask).item()
                rmses[i] = RMSE(X_filled, X_true, mask).item()

            if verbose and (i % report_interval == 0):
                if X_true is not None:
                    logging.info(
                        f'Iteration {i}:\t Loss: {loss.item() / self.n_pairs:.4f}\t '
                        f'Validation MAE: {maes[i]:.4f}\t'
                        f'RMSE: {rmses[i]:.4f}')
                else:
                    logging.info(
                        f'Iteration {i}:\t Loss: {loss.item() / self.n_pairs:.4f}'
                    )

        X_filled = X.detach().clone()
        X_filled[mask.bool()] = imps

        if X_true is not None:
            return X_filled, maes, rmses
        else:
            return X_filled
Пример #27
0
def run(dconf, tconf):
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    ds_factory = DatasetFactory(dconf)
    train_ds = ds_factory.get_train_dataset()
    test_ds = ds_factory.get_test_dataset()

    train_loader = DataLoader(
        dataset=train_ds, 
        batch_size=tconf.batch_size,
        shuffle=True,
        num_workers=1
    )
    test_loader = DataLoader(
        dataset=test_ds,
        batch_size=tconf.batch_size,
        shuffle=False,
        num_workers=1
    )

    model = Model(dconf)
    if tconf.pretrained is not None:
        print('load pretrained model...')
        try:
            model.load_state_dict(torch.load(tconf.pretrained))
        except Exception as e:
            model = torch.load(tconf.pretrained)
    else:
        model.apply(weights_init)
    model = model.cuda()
    criterion = nn.MSELoss().cuda()
    optimizer = optim.Adam(model.parameters(), lr=tconf.learning_rate)

    log_dir = log_name+str(dconf.len_close)+str(dconf.len_period)+str(dconf.len_trend)+'/'
    writer = SummaryWriter(log_dir)
    
    confs = {
        'Data Config': dconf, 
        'Train Config': tconf, 
        'Model Config': model.mconf
    }
    save_confs(log_dir+'confs', confs)

    step = 0
    for epoch in range(tconf.max_epoch):
        if epoch in tconf.milestones:
            print('Set lr=',tconf.milestones[epoch])
            for param_group in optimizer.param_groups:
                param_group["lr"] = tconf.milestones[epoch]

        model.train()
        for i, (X, X_ext, Y, Y_ext) in enumerate(train_loader, 0):
            X = X.cuda()
            X_ext = X_ext.cuda() 
            Y = Y.cuda() 
            Y_ext = Y_ext.cuda()

            optimizer.zero_grad()

            h = model(X, X_ext, Y_ext)
            loss = criterion(h, Y)
            loss.backward()

            nn.utils.clip_grad_norm_(model.parameters(), tconf.grad_threshold)
            optimizer.step()

            if step % 10 == 0:
                rmse = RMSE(h, Y, ds_factory.ds.mmn, ds_factory.dataset.m_factor)
                print("[epoch %d][%d/%d] mse: %.4f rmse: %.4f" % (epoch, i+1, len(train_loader), loss.item(), rmse.item()))
                writer.add_scalar('mse', loss.item(), step)
                writer.add_scalar('rmse', rmse.item(), step)
            step += 1
        
        model.eval()
        mse = 0.0
        mae = 0.0
        with torch.no_grad():
            for i, (X, X_ext, Y, Y_ext) in enumerate(test_loader, 0):
                X = X.cuda()
                X_ext = X_ext.cuda() 
                Y = Y.cuda() 
                Y_ext = Y_ext.cuda()
                h = model(X, X_ext, Y_ext)
                loss = criterion(h, Y)
                mse += X.size()[0] * loss.item()
                mae += X.size()[0] * torch.mean(torch.abs(Y - h)).item()
        mse /= ds_factory.ds.X_test.shape[0]
        rmse = math.sqrt(mse) * (ds_factory.ds.mmn.max - ds_factory.ds.mmn.min) / 2. * ds_factory.dataset.m_factor
        print("[epoch %d] test_rmse: %.4f\n" % (epoch, rmse))
        writer.add_scalar('test_rmse', rmse, epoch)
        torch.save(model.state_dict(), log_dir+'{}.model'.format(epoch))
Пример #28
0
    train_data = data[:int(ratio * data.shape[0])]
    vali_data = data[int(ratio * data.shape[0]):int((ratio + (1 - ratio) / 2) *
                                                    data.shape[0])]
    test_data = data[int((ratio + (1 - ratio) / 2) * data.shape[0]):]

    # complete rating matrix
    rows = max(dict_userid_to_index.values()) + 1
    columns = max(dict_itemid_to_index.values()) + 1

    R = np.zeros((rows, columns))
    for tuple in train_data:
        R[int(tuple[0]), int(tuple[1])] = float(tuple[2])

    # model
    model = PMF(R=R,
                lambda_alpha=lambda_alpha,
                lambda_beta=lambda_beta,
                latent_size=latent_size,
                momuntum=0.9,
                lr=lr,
                iters=iters,
                seed=1)

    U, V, train_loss_list, vali_rmse_list = model.train(train_data=train_data,
                                                        vali_data=vali_data)

    preds = model.predict(data=test_data)
    test_rmse = RMSE(preds, test_data[:, 2])

    print('test rmse:{}'.format(test_rmse))
Пример #29
0
def run(data_path, out_path, datasets, regressor, n_parts=10,
        sst_method='predictions', sst_n_part=None, seed=2018):
    idx_column = ['fold_{:02d}'.format(k + 1) for k in range(n_parts)]
    idx_column.append('mean')
    for d, nt in datasets.items():
        print(d)
        dataset_file = os.path.join(data_path, '{}.csv'.format(d))
        data = pd.read_csv(dataset_file)
        target_names = list(data)[-nt:]
        # Transform data in numpy.ndarray
        data = data.values

        log_columns = ['armse', 'arrmse']
        log_columns.extend(['rmse_{}'.format(tn) for tn in target_names])
        log_columns.extend(['rrmse_{}'.format(tn) for tn in target_names])
        out_log = pd.DataFrame(
            np.zeros((n_parts + 1, 2 * nt + 2)),
            columns=log_columns
        )

        kf = KFold(n_splits=n_parts, shuffle=True, random_state=seed)
        for k, (train_index, test_index) in enumerate(kf.split(data)):
            print('Fold {:02d}'.format(k + 1))
            X_train, X_test = data[train_index, :-nt], data[test_index, :-nt]
            Y_train, Y_test = data[train_index, -nt:], data[test_index, -nt:]

            scaler = preprocessing.StandardScaler().fit(X_train)
            X_train = scaler.transform(X_train)
            X_test = scaler.transform(X_test)

            regr, params = get_regressor(regressor, seed=seed)
            sst = StackedSingleTarget(
                regressor=regr,
                regressor_params=params,
                method=sst_method,
                n_part=sst_n_part
            )

            sst.fit(X_train, Y_train)
            predictions = sst.predict(X_test)

            for t in range(nt):
                out_log.loc[k, 'rrmse_{}'.format(target_names[t])] = \
                    RRMSE(Y_test[:, t], predictions[:, t])
                out_log.loc[k, 'rmse_{}'.format(target_names[t])] = \
                    RMSE(Y_test[:, t], predictions[:, t])
            out_log.loc[k, 'arrmse'] = aRRMSE(Y_test, predictions)
            out_log.loc[k, 'armse'] = aRMSE(Y_test, predictions)

        for c in range(2*nt + 2):
            out_log.iloc[-1:, c] = np.mean(out_log.iloc[:-1, c])

        out_log.insert(0, 'partition', idx_column)

        if sst_method == 'predictions':
            log_name = 'results_sst_{}_{}_predictions.csv'.format(d, regressor)
        elif sst_method == 'internal_cv':
            log_name = 'results_sst_{}_{}_internal_cv_{}.csv'.\
                       format(d, regressor, sst_n_part)
        elif sst_method == 'targets_values':
            log_name = 'results_sst_{}_{}_targets_values.csv'.\
                       format(d, regressor)
        out_log.to_csv(os.path.join(out_path, log_name), index=False)
Пример #30
0
    def fit_transform(self, X, verbose=True, report_interval=1, X_true=None):
        """
        Fits the imputer on a dataset with missing data, and returns the
        imputations.

        Parameters
        ----------
        X : torch.DoubleTensor or torch.cuda.DoubleTensor, shape (n, d)
            Contains non-missing and missing data at the indices given by the
            "mask" argument. Missing values can be arbitrarily assigned 
            (e.g. with NaNs).

        mask : torch.DoubleTensor or torch.cuda.DoubleTensor, shape (n, d)
            mask[i,j] == 1 if X[i,j] is missing, else mask[i,j] == 0.

        verbose : bool, default=True
            If True, output loss to log during iterations.
            
        report_interval : int, default=1
            Interval between loss reports (if verbose).

        X_true: torch.DoubleTensor or None, default=None
            Ground truth for the missing values. If provided, will output a 
            validation score during training. For debugging only.

        Returns
        -------
        X_filled: torch.DoubleTensor or torch.cuda.DoubleTensor
            Imputed missing data (plus unchanged non-missing data).

        """

        X = X.clone()
        n, d = X.shape
        mask = torch.isnan(X).double()
        normalized_tol = self.tol * torch.max(torch.abs(X[~mask.bool()]))

        if self.batchsize > n // 2:
            e = int(np.log2(n // 2))
            self.batchsize = 2**e
            if verbose:
                logging.info(
                    f"Batchsize larger that half size = {len(X) // 2}."
                    f" Setting batchsize to {self.batchsize}.")

        order_ = torch.argsort(mask.sum(0))

        optimizers = [
            self.opt(self.models[i].parameters(),
                     lr=self.lr,
                     weight_decay=self.weight_decay) for i in range(d)
        ]

        imps = (self.noise * torch.randn(mask.shape).double() +
                nanmean(X, 0))[mask.bool()]
        X[mask.bool()] = imps
        X_filled = X.clone()

        if X_true is not None:
            maes = np.zeros(self.max_iter)
            rmses = np.zeros(self.max_iter)

        for i in range(self.max_iter):

            if self.order == 'random':
                order_ = np.random.choice(d, d, replace=False)
            X_old = X_filled.clone().detach()

            loss = 0

            for l in range(d):
                j = order_[l].item()
                n_not_miss = (~mask[:, j].bool()).sum().item()

                if n - n_not_miss == 0:
                    continue  # no missing value on that coordinate

                for k in range(self.niter):

                    loss = 0

                    X_filled = X_filled.detach()
                    X_filled[mask[:, j].bool(), j] = self.models[j](
                        X_filled[mask[:,
                                      j].bool(), :][:,
                                                    np.r_[0:j,
                                                          j + 1:d]]).squeeze()

                    for _ in range(self.n_pairs):

                        idx1 = np.random.choice(n,
                                                self.batchsize,
                                                replace=False)
                        X1 = X_filled[idx1]

                        if self.unsymmetrize:
                            n_miss = (~mask[:, j].bool()).sum().item()
                            idx2 = np.random.choice(
                                n_miss,
                                self.batchsize,
                                replace=self.batchsize > n_miss)
                            X2 = X_filled[~mask[:, j].bool(), :][idx2]

                        else:
                            idx2 = np.random.choice(n,
                                                    self.batchsize,
                                                    replace=False)
                            X2 = X_filled[idx2]

                        loss = loss + self.sk(X1, X2)

                    optimizers[j].zero_grad()
                    loss.backward()
                    optimizers[j].step()

                # Impute with last parameters
                with torch.no_grad():
                    X_filled[mask[:, j].bool(), j] = self.models[j](
                        X_filled[mask[:,
                                      j].bool(), :][:,
                                                    np.r_[0:j,
                                                          j + 1:d]]).squeeze()

            if X_true is not None:
                maes[i] = MAE(X_filled, X_true, mask).item()
                rmses[i] = RMSE(X_filled, X_true, mask).item()

            if verbose and (i % report_interval == 0):
                if X_true is not None:
                    logging.info(
                        f'Iteration {i}:\t Loss: {loss.item() / self.n_pairs:.4f}\t'
                        f'Validation MAE: {maes[i]:.4f}\t'
                        f'RMSE: {rmses[i]: .4f}')
                else:
                    logging.info(
                        f'Iteration {i}:\t Loss: {loss.item() / self.n_pairs:.4f}'
                    )

            if torch.norm(X_filled - X_old, p=np.inf) < normalized_tol:
                break

        if i == (self.max_iter - 1) and verbose:
            logging.info('Early stopping criterion not reached')

        self.is_fitted = True

        if X_true is not None:
            return X_filled, maes, rmses
        else:
            return X_filled