def TrTeGlobalModel(lossFunc, inputData, alpha, lamda):
    # Train and Test the expert absolute labels.
    # inputData is a dictionary and contains the following keys:
    # featSin is the N by d matrix and N is the number of concensus labels or one expert.
    # featCmp is the M by d matrix and M is the the number of comparisons on singe expert.
    # labelRSD is the (N,) array contains +1 or -1.
    # labelAbs is the (N, numOfExpLbl) array contains +1 or -1, N is the number of samples for a singe expert and numOfExperLbl is the number of expert who labeled the N images.
    # labelCmp is the (M, numOfExpCmp) array contains +1 or -1, M is the M is the the number of comparisons for singe expert, numOfExpCmp is the number of expert who label the comparison data.
    # indexLblTrain is a list range in (0,N-1) that contains the training label data index for one expert or RSD. indexLbl Test is for testing.
    # indexCmpTrain is a list range in (0,M-1) that contains the training comparison data for one expert.
    data = dataPrepare(inputData)
    expAbs = data.ExpAbs()
    featTrLblAbs = expAbs['featTrainLblAbs']
    labelTrLblAbs = expAbs['labelTrainLblAbs']
    featTeLblSinAbs = expAbs['featTestLblSinAbs']
    featTrCmpAbs = expAbs['featTrainCmpAbs']
    labelTrCmpAbs = expAbs['labelTrainCmpAbs']
    featTeCmpSinAbs = expAbs['featTestCmpSinAbs']
    if lossFunc == 'LogLog':
        beta, const = Log_Log(featTrLblAbs,
                              labelTrLblAbs,
                              featTrCmpAbs,
                              labelTrCmpAbs,
                              absWeight=alpha,
                              lamda=lamda)
    elif lossFunc == 'LogSVM':
        beta, const = Log_SVM(featTrLblAbs,
                              labelTrLblAbs,
                              featTrCmpAbs,
                              labelTrCmpAbs,
                              absWeight=alpha,
                              lamda=lamda)
    elif lossFunc == 'SVMLog':
        beta, const = SVM_Log(featTrLblAbs,
                              labelTrLblAbs,
                              featTrCmpAbs,
                              labelTrCmpAbs,
                              absWeight=alpha,
                              lamda=lamda)
    elif lossFunc == 'SVMSVM':
        beta, const = SVM_SVM(featTrLblAbs,
                              labelTrLblAbs,
                              featTrCmpAbs,
                              labelTrCmpAbs,
                              absWeight=alpha,
                              lamda=lamda)
    else:
        sys.exit(
            'Please choose the correct loss function from one of {Log_Log,Log_SVM,SVM_Log,SVM_SVM}'
        )
    scoreLblTestSin = np.dot(featTeLblSinAbs, np.array(beta)) + const
    scoreCmpTestSin = np.dot(featTeCmpSinAbs, np.array(beta)) + const
    beta = np.array(beta).T

    return beta, const, scoreLblTestSin, scoreCmpTestSin
def LogSVMTrTeExpBias(inputData,
                      alpha,
                      lamda,
                      penaltyTimes=100,
                      penaltyType='l1',
                      numMaxIters=10000):
    # Train and Test the expert absolute labels.
    # inputData is a dictionary and contains the following keys:
    # featSin is the N by d matrix and N is the number of concensus labels or one expert.
    # featCmp is the M by d matrix and M is the the number of comparisons on singe expert.
    # labelRSD is the (N,) array contains +1 or -1.
    # labelAbs is the (N, numOfExpLbl) array contains +1 or -1, N is the number of samples for a singe expert and numOfExperLbl is the number of expert who labeled the N images.
    # labelCmp is the (M, numOfExpCmp) array contains +1 or -1, M is the M is the the number of comparisons for singe expert, numOfExpCmp is the number of expert who label the comparison data.
    # indexLblTrain is a list range in (0,N-1) that contains the training label data index for one expert or RSD. indexLbl Test is for testing.
    # indexCmpTrain is a list range in (0,M-1) that contains the training comparison data for one expert.
    data = dataPrepare(inputData)
    expBias = data.ExpAbsBias(penaltyTimes=penaltyTimes)
    featTrLblBias = expBias['featTrainLblBias']
    labelTrLblBias = expBias['labelTrainLblBias']
    featTeLblListBias = expBias['featTestLblBiasList']
    featTrCmpBias = expBias['featTrainCmpBias']
    labelTrCmpBias = expBias['labelTrainCmpBias']
    featTeCmpSinBias = expBias['featTestCmpSinBias']
    numOfExpLbl = inputData['numOfExpLbl']
    numOfExpCmp = inputData['numOfExpCmp']
    scoreLblListTest = [None] * numOfExpLbl
    if penaltyType == 'l1':
        beta, const = Log_SVM(featTrLblBias,
                              labelTrLblBias,
                              featTrCmpBias,
                              labelTrCmpBias,
                              absWeight=alpha,
                              lamda=lamda)
        for expLbl in range(numOfExpLbl):
            scoreLblListTest[expLbl] = np.dot(featTeLblListBias[expLbl].copy(),
                                              np.array(beta)) + const
        scoreCmpTestSin = np.dot(featTeCmpSinBias, np.array(beta)) + const
        beta = np.array(beta).T
    else:
        sys.exit('The penalty type must be either l1 or l2')

    return beta, const, scoreLblListTest, scoreCmpTestSin
Пример #3
0
def TrainSinleExp(alpha,
                  labelSin,
                  labelAbs,
                  labelCmp,
                  labelTrainPartition,
                  labelTestPartition,
                  cmpTrainPartition,
                  cmpTestPartition,
                  loss,
                  num_iters=10000):
    Yl = np.reshape(labelAbs[:, :-1], [
        -1,
    ], order='F')
    YlSin = 1 * labelSin
    YlRSD = labelAbs[:, 13]
    Ntol = N * numOfExpts4Lbl
    Mtol = M * numOfExpts4Cmp
    # prepare Beta File
    betaSinTotal = np.zeros([d, repeatTimes * K])
    constSinTotal = np.zeros([1, repeatTimes * K])

    # Prepare Exp13 File score variables
    scoreSin2Abs = np.zeros([N * numOfExpts4Lbl, repeatTimes])

    # PrePare RSD File score variables
    scoreSin2RSD = np.zeros([N, repeatTimes])

    # Prepare comparison score variables
    scoreSin2Cmp = np.zeros([M * numOfExpts4Cmp,
                             repeatTimes * K])  # Train RSD Labels
    locSin2Cmp = np.zeros([M * numOfExpts4Cmp,
                           repeatTimes * K])  # Train RSD Labels
    aucSin2Abs = np.zeros([1, repeatTimes])
    aucSin2RSD = np.zeros([1, repeatTimes])
    aucSin2Cmp = np.zeros([1, repeatTimes])
    betaMat = np.zeros([repeatTimes * K, d])
    constMat = np.zeros([repeatTimes * K, 1])
    for repeatCount in range(repeatTimes):
        for countFold in range(K):
            trainLIndex = labelTrainPartition[repeatCount][countFold].copy()
            testLIndex = labelTestPartition[repeatCount][countFold].copy()
            trainCIndex = cmpTrainPartition[repeatCount][countFold].copy()
            testCIndex = cmpTestPartition[repeatCount][countFold].copy()
            trainLIndex = np.reshape(trainLIndex, [
                -1,
            ])
            testLIndex = np.reshape(testLIndex, [
                -1,
            ])
            trainCIndex = np.reshape(trainCIndex, [
                -1,
            ])
            testCIndex = np.reshape(testCIndex, [
                -1,
            ])
            trainFeatC = cmpFeat[trainCIndex, :]
            testFeatC = cmpFeat[testCIndex, :]
            trainFeatL = labelFeat[trainLIndex, :]
            testFeatL = labelFeat[testLIndex, :]

            # Prepare 13 Experts with experts bias training and testing feats labels
            YtrainExp13C = np.array([])
            for eC in range(numOfExpts4Cmp):
                YtrainExp13C = np.append(YtrainExp13C, labelCmp[trainCIndex,
                                                                eC])

            # Prepare RSD training adn testing feats labels
            XtrainSinL = 1 * trainFeatL
            XtrainSinC = np.tile(trainFeatC, [numOfExpts4Cmp, 1])
            YtrainSinL = 1 * YlSin[trainLIndex]
            YtrainSinC = 1 * YtrainExp13C
            countLamda = 0
            for lamda in lamdaWeights:
                # Train RSD Model
                if loss == 'LogLog':
                    betaSin, constSin = Log_Log(XtrainSinL, YtrainSinL,
                                                XtrainSinC, YtrainSinC, alpha,
                                                lamda)
                elif loss == 'LogSVM':
                    betaSin, constSin = Log_SVM(XtrainSinL, YtrainSinL,
                                                XtrainSinC, YtrainSinC, alpha,
                                                lamda)
                elif loss == 'SVMLog':
                    betaSin, constSin = SVM_Log(XtrainSinL, YtrainSinL,
                                                XtrainSinC, YtrainSinC, alpha,
                                                lamda)
                elif loss == 'SVMSVM':
                    betaSin, constSin = SVM_SVM(XtrainSinL, YtrainSinL,
                                                XtrainSinC, YtrainSinC, alpha,
                                                lamda)
                else:
                    sys.exit('Loss is wrong, now it is ' + str(loss))
                betaSin = np.array(betaSin)
                constSin = np.array(constSin)
                # Save the Paramter Values
                betaMat[countFold + K * repeatCount, :] = np.array(betaSin.T)
                constMat[countFold + K * repeatCount, :] = constSin

                # Test on Exp13 Label
                for eLT in range(numOfExpts4Lbl):
                    scoreSin2Abs[eLT * N + testLIndex,
                                 repeatCount] = np.reshape(
                                     np.dot(testFeatL, betaSin) + constSin, [
                                         -1,
                                     ])
                for eCT in range(numOfExpts4Cmp):
                    scoreSin2Cmp[eCT * M + testCIndex,
                                 K * repeatCount + countFold] = np.reshape(
                                     np.dot(testFeatC, betaSin) + constSin, [
                                         -1,
                                     ])
                    locSin2Cmp[eCT * M + testCIndex,
                               K * repeatCount + countFold] = 1
                # Test On RSD Label
                scoreSin2RSD[testLIndex, repeatCount] = np.reshape(
                    np.dot(testFeatL, betaSin), [
                        -1,
                    ])
                countLamda += 1

    # Compute all the scores and auc for each repeat time.
        aucSin2Abs[0, repeatCount] = metrics.roc_auc_score(
            Yl, scoreSin2Abs[:, repeatCount])
        aucSin2RSD[0, repeatCount] = metrics.roc_auc_score(
            YlRSD, scoreSin2RSD[:, repeatCount])
        indexCmpValidTe = np.where(
            np.reshape(locSin2Cmp[:, repeatCount], [
                -1,
            ]) != 0)[0]
        aucSin2Cmp[0, repeatCount] = metrics.roc_auc_score(
            Yc[indexCmpValidTe], scoreSin2Cmp[indexCmpValidTe, repeatCount])

    return aucSin2Abs, aucSin2RSD, aucSin2Cmp, betaMat, constMat, scoreSin2RSD
def TrTeExpertBiasModel(lossFunc, inputData, alpha, lamda, penaltyTimes=100):
    # Train and Test the expert absolute labels.
    # inputData is a dictionary and contains the following keys:
    # featSin is the N by d matrix and N is the number of concensus labels or one expert.
    # featCmp is the M by d matrix and M is the the number of comparisons on singe expert.
    # labelRSD is the (N,) array contains +1 or -1.
    # labelAbs is the (N, numOfExpLbl) array contains +1 or -1, N is the number of samples for a singe expert and numOfExperLbl is the number of expert who labeled the N images.
    # labelCmp is the (M, numOfExpCmp) array contains +1 or -1, M is the M is the the number of comparisons for singe expert, numOfExpCmp is the number of expert who label the comparison data.
    # indexLblTrain is a list range in (0,N-1) that contains the training label data index for one expert or RSD. indexLbl Test is for testing.
    # indexCmpTrain is a list range in (0,M-1) that contains the training comparison data for one expert.
    data = dataPrepare(inputData)
    expBias = data.ExpAbsBias(penaltyTimes=penaltyTimes)
    featTrLblBias = expBias['featTrainLblBias']
    labelTrLblBias = expBias['labelTrainLblBias']
    featTeLblListBias = expBias['featTestLblBiasList']
    featTrCmpBias = expBias['featTrainCmpBias']
    labelTrCmpBias = expBias['labelTrainCmpBias']
    featTeCmpSinBias = expBias['featTestCmpSinBias']
    numOfExpLbl = inputData['numOfExpLbl']
    numOfExpCmp = inputData['numOfExpCmp']
    scoreLblListTest = [None] * numOfExpLbl
    if lossFunc == 'LogLog':
        beta, const = Log_Log(featTrLblBias,
                              labelTrLblBias,
                              featTrCmpBias,
                              labelTrCmpBias,
                              absWeight=alpha,
                              lamda=lamda)
    elif lossFunc == 'LogSVM':
        beta, const = Log_SVM(featTrLblBias,
                              labelTrLblBias,
                              featTrCmpBias,
                              labelTrCmpBias,
                              absWeight=alpha,
                              lamda=lamda)
    elif lossFunc == 'SVMLog':
        beta, const = SVM_Log(featTrLblBias,
                              labelTrLblBias,
                              featTrCmpBias,
                              labelTrCmpBias,
                              absWeight=alpha,
                              lamda=lamda)
    elif lossFunc == 'SVMSVM':
        beta, const = SVM_SVM(featTrLblBias,
                              labelTrLblBias,
                              featTrCmpBias,
                              labelTrCmpBias,
                              absWeight=alpha,
                              lamda=lamda)
    elif lossFunc == 'Boost':
        NAbs, _ = featTrLblBias.shape()
        NCmp, _ = featTrCmpBias.shape()
        est = GradientBoostingRegressor(n_estimators=100,
                                        learning_rate=0.1,
                                        max_depth=5,
                                        random_state=1,
                                        loss='ls').fit()
    else:
        sys.exit(
            'Please choose the correct loss function from one of {Log_Log,Log_SVM,SVM_Log,SVM_SVM}'
        )
    for expLbl in range(numOfExpLbl):
        scoreLblListTest[expLbl] = np.dot(featTeLblListBias[expLbl].copy(),
                                          np.array(beta)) + const
    scoreCmpTestSin = np.dot(featTeCmpSinBias, np.array(beta)) + const
    beta = np.array(beta).T

    return beta, const, scoreLblListTest, scoreCmpTestSin
def TrTeExpertModel(lossFunc, inputData, alpha, lamda, cmpExpOrder):
    # Train and Test the each experts data.
    # inputData is a dictionary and contains the following keys:
    # featSin is the N by d matrix and N is the number of concensus labels or one expert.
    # featCmp is the M by d matrix and M is the the number of comparisons on singe expert.
    # labelRSD is the (N,) array contains +1 or -1.
    # labelAbs is the (N, numOfExpLbl) array contains +1 or -1, N is the number of samples for a singe expert and numOfExperLbl is the number of expert who labeled the N images.
    # labelCmp is the (M, numOfExpCmp) array contains +1 or -1, M is the M is the the number of comparisons for singe expert, numOfExpCmp is the number of expert who label the comparison data.
    # indexLblTrain is a list range in (0,N-1) that contains the training label data index for one expert or RSD. indexLbl Test is for testing.
    # indexCmpTrain is a list range in (0,M-1) that contains the training comparison data for one expert.
    data = dataPrepare(inputData)
    expUnique = data.ExpUnique()
    featTrLblUniqueList = expUnique['featTrainLblUniqueList']
    labelTrLblUniqueList = expUnique['labelTrainLblUniqueList']
    featTeLblUniqueList = expUnique['featTestLblUniqueList']
    featTrCmpUniqueList = expUnique['featTrainCmpUniqueList']
    labelTrCmpUniqueList = expUnique['labelTrainCmpUnique']
    featTeCmpUniqueList = expUnique['featTestCmpUniqueList']
    numOfExpLbl = inputData['numOfExpLbl']
    numOfExpCmp = inputData['numOfExpCmp']
    betaList = [None] * numOfExpLbl
    constList = [None] * numOfExpLbl
    scoreLblListTest = [None] * numOfExpLbl
    scoreCmpListTest = [None] * numOfExpLbl
    if alpha != 0.0:
        for exp in range(numOfExpLbl):
            if not exp in cmpExpOrder:
                if lossFunc == 'LogLog':
                    beta, const = Logistic(featTrLblUniqueList[exp],
                                           labelTrLblUniqueList[exp],
                                           lamda,
                                           alpha=alpha)
                elif lossFunc == 'LogSVM':
                    beta, const = Logistic(featTrLblUniqueList[exp],
                                           labelTrLblUniqueList[exp],
                                           lamda,
                                           alpha=alpha)
                elif lossFunc == 'SVMLog':
                    beta, const = SVM(featTrLblUniqueList[exp],
                                      labelTrLblUniqueList[exp],
                                      lamda,
                                      alpha=alpha)
                elif lossFunc == 'SVMSVM':
                    beta, const = SVM(featTrLblUniqueList[exp],
                                      labelTrLblUniqueList[exp],
                                      lamda,
                                      alpha=alpha)
                else:
                    sys.exit(
                        'Please choose the correct loss function from one of {Log_Log,Log_SVM,SVM_Log,SVM_SVM}'
                    )
                betaList[exp] = np.array(beta).T
                constList[exp] = const
                scoreLblListTest[exp] = np.dot(featTeLblUniqueList[exp].copy(),
                                               np.array(beta)) + const
            elif exp in cmpExpOrder:
                if lossFunc == 'LogLog':
                    beta, const = Log_Log(featTrLblUniqueList[exp],
                                          labelTrLblUniqueList[exp],
                                          featTrCmpUniqueList[exp],
                                          labelTrCmpUniqueList[exp],
                                          absWeight=alpha,
                                          lamda=lamda)
                elif lossFunc == 'LogSVM':
                    beta, const = Log_SVM(featTrLblUniqueList[exp],
                                          labelTrLblUniqueList[exp],
                                          featTrCmpUniqueList[exp],
                                          labelTrCmpUniqueList[exp],
                                          absWeight=alpha,
                                          lamda=lamda)
                elif lossFunc == 'SVMLog':
                    beta, const = SVM_Log(featTrLblUniqueList[exp],
                                          labelTrLblUniqueList[exp],
                                          featTrCmpUniqueList[exp],
                                          labelTrCmpUniqueList[exp],
                                          absWeight=alpha,
                                          lamda=lamda)
                elif lossFunc == 'SVMSVM':
                    beta, const = SVM_SVM(featTrLblUniqueList[exp],
                                          labelTrLblUniqueList[exp],
                                          featTrCmpUniqueList[exp],
                                          labelTrCmpUniqueList[exp],
                                          absWeight=alpha,
                                          lamda=lamda)
                else:
                    sys.exit(
                        'Please choose the correct loss function from one of {Log_Log,Log_SVM,SVM_Log,SVM_SVM}'
                    )
                scoreLblListTest[exp] = np.dot(featTeLblUniqueList[exp].copy(),
                                               np.array(beta)) + const
                scoreCmpListTest[exp] = np.dot(featTeCmpUniqueList[exp].copy(),
                                               np.array(beta)) + const
                betaList[exp] = np.array(beta).T
                constList[exp] = const
            else:
                sys.exit('The expert order is wrong.')
    else:
        for exp in cmpExpOrder:
            if lossFunc == 'LogLog':
                beta, const = Log_Log(featTrLblUniqueList[exp],
                                      labelTrLblUniqueList[exp],
                                      featTrCmpUniqueList[exp],
                                      labelTrCmpUniqueList[exp],
                                      absWeight=alpha,
                                      lamda=lamda)
            elif lossFunc == 'LogSVM':
                beta, const = Log_SVM(featTrLblUniqueList[exp],
                                      labelTrLblUniqueList[exp],
                                      featTrCmpUniqueList[exp],
                                      labelTrCmpUniqueList[exp],
                                      absWeight=alpha,
                                      lamda=lamda)
            elif lossFunc == 'SVMLog':
                beta, const = SVM_Log(featTrLblUniqueList[exp],
                                      labelTrLblUniqueList[exp],
                                      featTrCmpUniqueList[exp],
                                      labelTrCmpUniqueList[exp],
                                      absWeight=alpha,
                                      lamda=lamda)
            elif lossFunc == 'SVMSVM':
                beta, const = SVM_SVM(featTrLblUniqueList[exp],
                                      labelTrLblUniqueList[exp],
                                      featTrCmpUniqueList[exp],
                                      labelTrCmpUniqueList[exp],
                                      absWeight=alpha,
                                      lamda=lamda)
            else:
                sys.exit(
                    'Please choose the correct loss function from one of {Log_Log,Log_SVM,SVM_Log,SVM_SVM}'
                )
            scoreLblListTest[exp] = np.dot(featTeLblUniqueList[exp].copy(),
                                           np.array(beta)) + const
            scoreCmpListTest[exp] = np.dot(featTeCmpUniqueList[exp].copy(),
                                           np.array(beta)) + const
            betaList[exp] = np.array(beta).T
            constList[exp] = const

    return betaList, constList, scoreLblListTest, scoreCmpListTest
    YtrainRSDLPlus)  # Plus and PreP have the same number of training samples
XtrainRSD = np.concatenate((XtrainRSDL, XtrainRSDC), axis=0)
YtrainRSDPlus = np.append(YtrainRSDLPlus, YtrainRSDC)
YtrainRSDPreP = np.append(YtrainRSDLPreP, YtrainRSDC)

# 1 Plus Predict RSD Here training Exp13Bias
alphaExp132RSDPlus = 1.0
lambdaExp132RSDPlus = 0.1
betaExp132RSDPlus, constExp132RSDPlus = Log_Log(XtrainExp13L, YtrainExp13LPlus,
                                                XtrainExp13C, YtrainExp13C,
                                                alphaExp132RSDPlus,
                                                lambdaExp132RSDPlus)
betaExp132RSDPlus = np.array(betaExp132RSDPlus[0:d])
# featurePlot(betaExp132RSDPlus,nameBase+'featureSelectionNormalizedRSDPlus.pdf', 'Predicting RSD Label Plus vs Not Plus',featsName)

## 2 PreP Predict RSD Here training Exp13Bias
alphaExp132RSDPreP = 1.0
lambdaExp132RSDPreP = 0.3
betaExp132RSDPreP, constExp132RSDPreP = Log_SVM(XtrainExp13L, YtrainExp13LPreP,
                                                XtrainExp13C, YtrainExp13C,
                                                alphaExp132RSDPreP,
                                                lambdaExp132RSDPreP)
betaExp132RSDPreP = np.array(betaExp132RSDPreP[0:d])
# featurePlot(betaExp132RSDPreP,nameBase+'featureSelectionNormalizedRSDPreP.pdf', 'Predicting RSD Label Not Normal vs Normal',featsName)

outputDict = {
    'RSDPlus': [betaExp132RSDPlus, constExp132RSDPlus],
    'RSDPreP': [betaExp132RSDPreP, constExp132RSDPreP]
}
savemat(dataType + 'Beta.mat', outputDict)
def LogSVMTrTeExpUnique(inputData,
                        alpha,
                        lamda,
                        cmpExpOrder,
                        penaltyType='l1',
                        numMaxIters=10000):
    # Train and Test the each experts data.
    # inputData is a dictionary and contains the following keys:
    # featSin is the N by d matrix and N is the number of concensus labels or one expert.
    # featCmp is the M by d matrix and M is the the number of comparisons on singe expert.
    # labelRSD is the (N,) array contains +1 or -1.
    # labelAbs is the (N, numOfExpLbl) array contains +1 or -1, N is the number of samples for a singe expert and numOfExperLbl is the number of expert who labeled the N images.
    # labelCmp is the (M, numOfExpCmp) array contains +1 or -1, M is the M is the the number of comparisons for singe expert, numOfExpCmp is the number of expert who label the comparison data.
    # indexLblTrain is a list range in (0,N-1) that contains the training label data index for one expert or RSD. indexLbl Test is for testing.
    # indexCmpTrain is a list range in (0,M-1) that contains the training comparison data for one expert.
    data = dataPrepare(inputData)
    expUnique = data.ExpUnique()
    featTrLblUniqueList = expUnique['featTrainLblUniqueList']
    labelTrLblUniqueList = expUnique['labelTrainLblUniqueList']
    featTeLblUniqueList = expUnique['featTestLblUniqueList']
    featTrCmpUniqueList = expUnique['featTrainCmpUniqueList']
    labelTrCmpUniqueList = expUnique['labelTrainCmpUnique']
    featTeCmpUniqueList = expUnique['featTestCmpUniqueList']
    numOfExpLbl = inputData['numOfExpLbl']
    numOfExpCmp = inputData['numOfExpCmp']
    betaList = [None] * numOfExpLbl
    constList = [None] * numOfExpLbl
    scoreLblListTest = [None] * numOfExpLbl
    scoreCmpListTest = [None] * numOfExpLbl
    if penaltyType == 'l1':
        if alpha != 0.0:
            for exp in range(numOfExpLbl):
                if not exp in cmpExpOrder:
                    beta, const = Logistic(featTrLblUniqueList[exp],
                                           labelTrLblUniqueList[exp],
                                           alpha=alpha,
                                           lamda=lamda)
                    betaList[exp] = np.array(beta).T
                    constList[exp] = const
                    scoreLblListTest[exp] = np.dot(
                        featTeLblUniqueList[exp].copy(),
                        np.array(beta)) + const
                elif exp in cmpExpOrder:
                    beta, const = Log_SVM(featTrLblUniqueList[exp],
                                          labelTrLblUniqueList[exp],
                                          featTrCmpUniqueList[exp],
                                          labelTrCmpUniqueList[exp],
                                          absWeight=alpha,
                                          lamda=lamda)
                    scoreLblListTest[exp] = np.dot(
                        featTeLblUniqueList[exp].copy(),
                        np.array(beta)) + const
                    scoreCmpListTest[exp] = np.dot(
                        featTeCmpUniqueList[exp].copy(),
                        np.array(beta)) + const
                    betaList[exp] = np.array(beta).T
                    constList[exp] = const
                else:
                    sys.exit('The expert order is wrong.')
        else:
            for exp in cmpExpOrder:
                beta, const = Log_SVM(featTrLblUniqueList[exp],
                                      labelTrLblUniqueList[exp],
                                      featTrCmpUniqueList[exp],
                                      labelTrCmpUniqueList[exp],
                                      absWeight=alpha,
                                      lamda=lamda)
                scoreLblListTest[exp] = np.dot(featTeLblUniqueList[exp].copy(),
                                               np.array(beta)) + const
                scoreCmpListTest[exp] = np.dot(featTeCmpUniqueList[exp].copy(),
                                               np.array(beta)) + const
                betaList[exp] = np.array(beta).T
                constList[exp] = const
    else:
        sys.exit('The penalty type must be either l1')

    return betaList, constList, scoreLblListTest, scoreCmpListTest