Beispiel #1
0
# along with Flame. If not, see <http://www.gnu.org/licenses/>.

import os
import shutil
import pathlib
import sys
import codecs
import string
import re 

from flame.util import utils, get_logger

# if the number of models is higher, try to run in multithread
MAX_MODELS_SINGLE_CPU = 4

LOG = get_logger(__name__)

def get_ensemble_input(task, model_names, model_versions, infile):
    '''
    Manage obtention of input data from a list of models
    '''

    num_models = len (model_names)
    
    # when there are multiple external sources it is more convenient parallelize the 
    # models than to run internal task in parallel

    parallel = (utils.isSingleThread() == False and num_models > MAX_MODELS_SINGLE_CPU)
    
    # disables internal parallelism
    # if parallel:
Beispiel #2
0
from nonconformist.base import ClassifierAdapter, RegressorAdapter
from nonconformist.acp import AggregatedCp
from nonconformist.acp import BootstrapSampler
from nonconformist.icp import IcpClassifier, IcpRegressor
from nonconformist.nc import ClassifierNc, MarginErrFunc, RegressorNc
from sklearn.neighbors import KNeighborsRegressor
from nonconformist.nc import AbsErrorErrFunc, RegressorNormalizer

from flame.stats.base_model import BaseEstimator
from flame.stats.model_validation import getCrossVal
from flame.stats.scale import scale, center
from flame.stats.model_validation import CF_QuanVal
from flame.util import get_logger

log = get_logger(__name__)


class RF(BaseEstimator):
    def __init__(self, X, Y, parameters):
        super(RF, self).__init__(X, Y, parameters)

        self.estimator_parameters = parameters['RF_parameters']
        self.tune = parameters['tune']
        self.tune_parameters = parameters['RF_optimize']

        if self.quantitative:
            self.name = "RF-R"
            self.tune_parameters.pop("class_weight")
        else:
            self.name = "RF-C"