コード例 #1
0
def computeErrorIndependently():
    # rvSet = range(0, 50)
    rvSet = range(50, 100)
    T = utils.properties.timeSpan
    tWin = utils.properties.tWin
    topology = utils.properties.dbn_topology
    numTrials = utils.properties.numTrials
    trainset, testset = DataProvider.provide_data()
    predictionModel = MultivariateDiscreteKalmanFilter()
    for obsrate in utils.properties.obsrateList:
        evidencepath = utils.properties.outputDirPath + str(obsrate) + '/evidences/'
        predictionpath = utils.properties.outputDirPath + str(obsrate) + '/predictions/'
        errorpath = utils.properties.outputDirPath + str(obsrate) + '/errors/'
        if 0.0 == obsrate:
            currentNumTrials = 1
        else:
            currentNumTrials = numTrials
        errResults = np.empty(shape=(currentNumTrials, T, 3))
        for trial in range(currentNumTrials):
            evidMat = np.loadtxt(evidencepath + '{}_activeInf_model={}_T={}_trial={}_obsrate={}.csv'.
                                 format('evidMat', utils.properties.prediction_model, T, trial, obsrate),
                                 delimiter=',').astype(np.bool_)
            predResults = np.loadtxt(predictionpath + 'mean/{}_activeInf_model={}_T={}_trial={}_obsRate={}.csv'.
                                     format('predResults', utils.properties.prediction_model, T, trial, obsrate),
                                     delimiter=',')
            for t in range(T):
                errResults[trial, t, 0] = predictionModel.compute_mean_absolute_error(testset[rvSet, t],
                                                                                      predResults[rvSet, t], type_=0,
                                                                                      evidence_mat=evidMat[rvSet, t])
                errResults[trial, t, 1] = predictionModel.compute_mean_absolute_error(testset[rvSet, t],
                                                                                      predResults[rvSet, t], type_=1,
                                                                                      evidence_mat=evidMat[rvSet, t])
                errResults[trial, t, 2] = predictionModel.compute_mean_absolute_error(testset[rvSet, t],
                                                                                      predResults[rvSet, t], type_=2,
                                                                                      evidence_mat=evidMat[rvSet, t])
            np.savetxt(errorpath +
                       '{}_activeInfo_model={}_topology={}_window={}_T={}_obsRate={}_trial={}.csv'.
                       format('mae_humid', utils.properties.prediction_model, topology, tWin, T, obsrate,
                              trial), errResults[trial], delimiter=',')
        np.savetxt(errorpath +
                   '{}_activeInf_model={}_topology={}_window={}_T={}_obsRate={}_trial={}.csv'.
                   format('meanMAE_humid', utils.properties.prediction_model, topology,tWin,T,obsrate, 'mean'),
                   np.mean(errResults, axis=0), delimiter=',')
        np.savetxt(errorpath +
                   '{}_activeInf_model={}_topology={}_window={}_T={}_obsRate={}_trial={}.csv'.
                   format('stderrMAE_humid', utils.properties.prediction_model, topology, tWin, T, obsrate,
                          'mean'), standard_error(errResults, axis=0), delimiter=',')
コード例 #2
0
def testActiveInferenceGaussianDBNParallel():
    start = time()
    print 'Process started at:', datetime.datetime.fromtimestamp(start).strftime('%H:%M, %m/%d/%Y')
    tWin = utils.properties.tWin
    topology = utils.properties.dbn_topology
    T = utils.properties.timeSpan
    numTrials = utils.properties.numTrials

    trainset, testset = DataProvider.provide_data()

    if 'gp' == utils.properties.prediction_model:
        prediction_model = GaussianProcessLocal()
    elif 'dgbn' == utils.properties.prediction_model:
        prediction_model = GaussianDBN()
    elif 'kf' == utils.properties.prediction_model:
        prediction_model = MultivariateDiscreteKalmanFilter()
    elif 'lc-linear' == utils.properties.prediction_model:
        prediction_model = LinearChain(regressionMethod='linear')
    elif 'lc-ridge' == utils.properties.prediction_model:
        prediction_model = LinearChain(regressionMethod='ridge')
    elif 'lc-lasso' == utils.properties.prediction_model:
        prediction_model = LinearChain(regressionMethod='lasso')
    else:
        raise ValueError('Unrecognized prediction model name')
    print 'Prediction model selected: ', prediction_model.__class__
    prediction_model.fit(trainset, topology=topology)
    print 'Prediction model was trained.'
    return
    Y_test_allT = np.vectorize(lambda x: x.true_label)(testset)
    parameterList = list()
    sampleSize = utils.properties.mh_sampleSize
    burnInCount = utils.properties.mh_burnInCount
    for obsrate in utils.properties.obsrateList:
        obsCount = int(obsrate * prediction_model.rvCount)
        evidencepath = utils.properties.outputDirPath + str(obsrate) + '/evidences/'
        if not os.path.exists(evidencepath):
            os.makedirs(evidencepath)
        meanPredictionPath = utils.properties.outputDirPath + str(obsrate) + '/predictions/mean/'
        if not os.path.exists(meanPredictionPath):
            os.makedirs(meanPredictionPath)
        varPredictionPath = utils.properties.outputDirPath + str(obsrate) + '/predictions/var/'
        if not os.path.exists(varPredictionPath):
            os.makedirs(varPredictionPath)
        errorpath = utils.properties.outputDirPath + str(obsrate) + '/errors/'
        if not os.path.exists(errorpath):
            os.makedirs(errorpath)
        selection_strategy_name = utils.properties.selectionStrategy
        if 0.0 == obsrate:
            trial = 0
            parameterList.append({'trial': trial, 'prediction_model': prediction_model,
                                  'selection_strategy_name': selection_strategy_name, 'T': T, 'tWin': tWin,
                                  'testset': testset, 'Y_test_allT': Y_test_allT, 'sampleSize': sampleSize,
                                  'burnInCount': burnInCount, 'topology': topology, 'obsrate': obsrate,
                                  'obsCount': obsCount, 'evidencepath': evidencepath,
                                  'meanPredictionPath': meanPredictionPath, 'varPredictionPath': varPredictionPath,
                                  'errorpath': errorpath})
        else:
            for trial in range(0, numTrials):
                parameterList.append({'trial': trial, 'prediction_model': prediction_model,
                                      'selection_strategy_name': selection_strategy_name, 'T': T, 'tWin': tWin,
                                      'testset': testset, 'Y_test_allT': Y_test_allT, 'sampleSize': sampleSize,
                                      'burnInCount': burnInCount, 'topology': topology, 'obsrate': obsrate,
                                      'obsCount': obsCount, 'evidencepath': evidencepath,
                                      'meanPredictionPath': meanPredictionPath, 'varPredictionPath': varPredictionPath,
                                      'errorpath': errorpath})

    print 'Tasks for parallel computation were created.'
    pool = mp.Pool(processes=utils.properties.numParallelThreads)
    print 'Tasks in parallel are being started.'
    pool.map(trialFuncStar, parameterList)
    # for params in parameterList:
    #     trialFuncStar(params)

    for obsrate in utils.properties.obsrateList:
        errorpath = utils.properties.outputDirPath + str(obsrate) + '/errors/'
        if 0.0 == obsrate:
            trial = 0
            errResults = np.loadtxt(errorpath +
                                    'mae_activeInfo_model={}_topology={}_window={}_T={}_obsRate={}_trial={}.csv'.
                                    format(utils.properties.prediction_model, topology, tWin, T, obsrate,
                                                          trial), delimiter=',')
            np.savetxt(errorpath +
                       'meanMAE_activeInf_model={}_topology={}_window={}_T={}_obsRate={}_trial={}.csv'.
                       format(utils.properties.prediction_model, topology, tWin, T, obsrate, 'mean'),
                       errResults, delimiter=',')
            np.savetxt(errorpath +
                       'stderrMAE_activeInf_model={}_topology={}_window={}_T={}_obsRate={}_trial={}.csv'.
                       format(utils.properties.prediction_model, topology, tWin, T, obsrate, 'mean'),
                       np.zeros(shape=errResults.shape), delimiter=',')
        else:
            errResults = np.empty(shape=(numTrials, T, 6))
            for trial in range(numTrials):
                errResults[trial] = np.loadtxt(errorpath + ('mae_activeInfo_model={}_topology={}_window={}_' +
                                               'T={}_obsRate={}_trial={}.csv').
                                               format(utils.properties.prediction_model, topology,tWin,T,obsrate,
                                               trial), delimiter=',')
            np.savetxt(errorpath +
                       'meanMAE_activeInf_model={}_topology={}_window={}_T={}_obsRate={}_trial={}.csv'.
                       format(utils.properties.prediction_model, topology,tWin,T,obsrate, 'mean'),
                       np.mean(errResults,axis=0), delimiter=',')
            np.savetxt(errorpath +
                       'stderrMAE_activeInf_model={}_topology={}_window={}_T={}_obsRate={}_trial={}.csv'.
                       format(utils.properties.prediction_model, topology,tWin,T,obsrate, 'mean'),
                       standard_error(errResults, axis=0), delimiter=',')
    print 'End of process, duration: {} secs'.format(time() - start)