Ejemplo n.º 1
0
    def __init__(self, deepnet, api=None):
        """The Deepnet constructor can be given as first argument:
            - a deepnet structure
            - a deepnet id
            - a path to a JSON file containing a deepnet structure

        """
        self.resource_id = None
        self.regression = False
        self.network = None
        self.networks = None
        self.input_fields = []
        self.class_names = []
        self.preprocess = []
        self.optimizer = None
        self.missing_numerics = False
        self.api = get_api_connection(api)
        self.resource_id, deepnet = get_resource_dict( \
            deepnet, "deepnet", api=self.api)

        if 'object' in deepnet and isinstance(deepnet['object'], dict):
            deepnet = deepnet['object']
        self.input_fields = deepnet['input_fields']
        if 'deepnet' in deepnet and isinstance(deepnet['deepnet'], dict):
            status = get_status(deepnet)
            objective_field = deepnet['objective_fields']
            deepnet = deepnet['deepnet']
            if 'code' in status and status['code'] == FINISHED:
                self.fields = deepnet['fields']
                missing_tokens = deepnet.get('missing_tokens')
                ModelFields.__init__(
                    self, self.fields,
                    objective_id=extract_objective(objective_field),
                    terms=True, categories=True, missing_tokens=missing_tokens)

                self.regression = \
                    self.fields[self.objective_id]['optype'] == NUMERIC
                if not self.regression:
                    self.class_names = [category for category, _ in \
                        self.fields[self.objective_id][ \
                        'summary']['categories']]
                    self.class_names.sort()
                    # order matters
                    self.objective_categories = [category for \
                        category, _ in self.fields[self.objective_id][ \
                       "summary"]["categories"]]

                self.missing_numerics = deepnet.get('missing_numerics', False)
                if 'network' in deepnet:
                    network = deepnet['network']
                    self.network = network
                    self.networks = network.get('networks', [])
                    self.preprocess = network.get('preprocess')
                    self.optimizer = network.get('optimizer', {})
            else:
                raise Exception("The deepnet isn't finished yet")
        else:
            raise Exception("Cannot create the Deepnet instance. Could not"
                            " find the 'deepnet' key in the resource:\n\n%s" %
                            deepnet)
Ejemplo n.º 2
0
    def __init__(self, model, api=None):

        self.api = get_api_connection(api)
        resource_id, model = extract_id(model, api)
        resource_type = get_resource_type(resource_id)
        kwargs = {"api": self.api}
        local_model = COMPONENT_CLASSES[resource_type](model, **kwargs)
        self.__class__.__bases__ = local_model.__class__.__bases__
        for attr, value in local_model.__dict__.items():
            setattr(self, attr, value)
        self.local_model = local_model
Ejemplo n.º 3
0
    def __init__(self, anomaly, api=None, cache_get=None):

        if use_cache(cache_get):
            # using a cache to store the Minomaly attributes
            self.__dict__ = load(get_anomaly_id(anomaly), cache_get)
            return

        self.resource_id = None
        self.sample_size = None
        self.input_fields = None
        self.default_numeric_value = None
        self.mean_depth = None
        self.expected_mean_depth = None
        self.iforest = None
        self.id_fields = []
        api = get_api_connection(api)
        self.resource_id, anomaly = get_resource_dict(
            anomaly, "anomaly", api=api)

        if 'object' in anomaly and isinstance(anomaly['object'], dict):
            anomaly = anomaly['object']
            self.sample_size = anomaly.get('sample_size')
            self.input_fields = anomaly.get('input_fields')
            self.default_numeric_value = anomaly.get('default_numeric_value')
            self.id_fields = anomaly.get('id_fields', [])

        if 'model' in anomaly and isinstance(anomaly['model'], dict):
            ModelFields.__init__(
                self, anomaly['model'].get('fields'),
                missing_tokens=anomaly['model'].get('missing_tokens'))

            self.mean_depth = anomaly['model'].get('mean_depth')
            self.normalization_factor = anomaly['model'].get(
                'normalization_factor')
            self.nodes_mean_depth = anomaly['model'].get(
                'nodes_mean_depth')
            status = get_status(anomaly)
            if 'code' in status and status['code'] == FINISHED:
                self.expected_mean_depth = None
                if self.mean_depth is None or self.sample_size is None:
                    raise Exception("The anomaly data is not complete. "
                                    "Score will not be available")
                self.norm = self.normalization_factor if \
                    self.normalization_factor is not None else \
                    self.norm_factor()
                iforest = anomaly['model'].get('trees', [])
                if iforest:
                    self.iforest = [
                        build_tree([anomaly_tree['root']])
                        for anomaly_tree in iforest]
                self.top_anomalies = anomaly['model']['top_anomalies']
            else:
                raise Exception("The anomaly isn't finished yet")
Ejemplo n.º 4
0
    def __init__(self, anomaly, api=None):

        self.resource_id = None
        self.sample_size = None
        self.input_fields = None
        self.mean_depth = None
        self.expected_mean_depth = None
        self.iforest = None
        self.top_anomalies = None
        self.id_fields = []
        self.api = get_api_connection(api)
        self.resource_id, anomaly = get_resource_dict( \
            anomaly, "anomaly", api=self.api)

        if 'object' in anomaly and isinstance(anomaly['object'], dict):
            anomaly = anomaly['object']
            self.sample_size = anomaly.get('sample_size')
            self.input_fields = anomaly.get('input_fields')
            self.id_fields = anomaly.get('id_fields', [])
        if 'model' in anomaly and isinstance(anomaly['model'], dict):
            ModelFields.__init__( \
                self, anomaly['model'].get('fields'), \
                missing_tokens=anomaly['model'].get('missing_tokens'))
            if ('top_anomalies' in anomaly['model']
                    and isinstance(anomaly['model']['top_anomalies'], list)):
                self.mean_depth = anomaly['model'].get('mean_depth')
                status = get_status(anomaly)
                if 'code' in status and status['code'] == FINISHED:
                    self.expected_mean_depth = None
                    if self.mean_depth is None or self.sample_size is None:
                        raise Exception("The anomaly data is not complete. "
                                        "Score will"
                                        " not be available")
                    else:
                        default_depth = self.mean_depth if \
                            self.sample_size == 1 else (2 * (DEPTH_FACTOR + \
                            math.log(self.sample_size - 1) - \
                            (float(self.sample_size - 1) / self.sample_size)))
                        self.expected_mean_depth = min(self.mean_depth,
                                                       default_depth)
                    iforest = anomaly['model'].get('trees', [])
                    if iforest:
                        self.iforest = [
                            AnomalyTree(anomaly_tree['root'], self.fields)
                            for anomaly_tree in iforest
                        ]
                    self.top_anomalies = anomaly['model']['top_anomalies']
                else:
                    raise Exception("The anomaly isn't finished yet")
            else:
                raise Exception("Cannot create the Anomaly instance. Could not"
                                " find the 'top_anomalies' key in the"
                                " resource:\n\n%s" % anomaly['model'].keys())
Ejemplo n.º 5
0
    def __init__(self, execution, api=None):

        self.resource_id = None
        self.outputs = None
        self.output_types = None
        self.output_resources = None
        self.result = None
        self.status = None
        self.source_location = None
        self.error = None
        self.error_message = None
        self.error_location = None
        self.call_stack = None
        self.api = get_api_connection(api)

        try:
            self.resource_id, execution = get_resource_dict( \
                execution, "execution", self.api)
        except ValueError as resource:
            try:
                execution = json.loads(str(resource))
                self.resource_id = execution["resource"]
            except ValueError:
                raise ValueError("The execution resource was faulty: \n%s" % \
                    resource)

        if 'object' in execution and isinstance(execution['object'], dict):
            execution = execution['object']
            self.status = execution["status"]
            self.error = self.status.get("error")
            if self.error is not None:
                self.error_message = self.status.get("message")
                self.error_location = self.status.get("source_location")
                self.call_stack = self.status.get("call_stack")
            else:
                self.source_location = self.status.get("source_location")
                if 'execution' in execution and \
                        isinstance(execution['execution'], dict):
                    execution = execution.get('execution')
                    self.result = execution.get("result")
                    self.outputs = dict((output[0], output[1]) \
                        for output in execution.get("outputs"))
                    self.output_types = dict((output[0], output[2]) \
                        for output in execution.get("outputs"))
                    self.output_resources = dict((res["variable"], res["id"]) \
                        for res in execution.get("output_resources"))
                    self.execution = execution
Ejemplo n.º 6
0
    def __init__(self, linear_regression, api=None):

        self.resource_id = None
        self.input_fields = []
        self.term_forms = {}
        self.tag_clouds = {}
        self.term_analysis = {}
        self.items = {}
        self.item_analysis = {}
        self.categories = {}
        self.coefficients = []
        self.data_field_types = {}
        self.field_codings = {}
        self.bias = None
        self.xtx_inverse = []
        self.mean_squared_error = None
        self.number_of_parameters = None
        self.number_of_samples = None
        self.api = get_api_connection(api)
        self.resource_id, linear_regression = get_resource_dict( \
            linear_regression, "linearregression", api=self.api)

        if 'object' in linear_regression and \
            isinstance(linear_regression['object'], dict):
            linear_regression = linear_regression['object']
        try:
            self.input_fields = linear_regression.get("input_fields", [])
            self.dataset_field_types = linear_regression.get(
                "dataset_field_types", {})
            self.weight_field = linear_regression.get("weight_field")
            objective_field = linear_regression['objective_fields'] if \
                linear_regression['objective_fields'] else \
                linear_regression['objective_field']
        except KeyError:
            raise ValueError("Failed to find the linear regression expected "
                             "JSON structure. Check your arguments.")
        if 'linear_regression' in linear_regression and \
            isinstance(linear_regression['linear_regression'], dict):
            status = get_status(linear_regression)
            if 'code' in status and status['code'] == FINISHED:
                linear_regression_info = linear_regression[ \
                    'linear_regression']
                fields = linear_regression_info.get('fields', {})

                if not self.input_fields:
                    self.input_fields = [ \
                        field_id for field_id, _ in
                        sorted(fields.items(),
                               key=lambda x: x[1].get("column_number"))]
                self.coeff_ids = self.input_fields[:]
                self.coefficients = linear_regression_info.get( \
                    'coefficients', [])
                self.bias = linear_regression_info.get('bias', True)
                self.field_codings = linear_regression_info.get( \
                     'field_codings', {})
                self.number_of_parameters = linear_regression_info.get( \
                    "number_of_parameters")
                missing_tokens = linear_regression_info.get("missing_tokens")

                objective_id = extract_objective(objective_field)
                ModelFields.__init__(self,
                                     fields,
                                     objective_id=objective_id,
                                     terms=True,
                                     categories=True,
                                     numerics=True,
                                     missing_tokens=missing_tokens)
                self.field_codings = linear_regression_info.get( \
                  'field_codings', {})
                self.format_field_codings()
                for field_id in self.field_codings:
                    if field_id not in fields and \
                            field_id in self.inverted_fields:
                        self.field_codings.update( \
                            {self.inverted_fields[field_id]: \
                             self.field_codings[field_id]})
                        del self.field_codings[field_id]
                stats = linear_regression_info["stats"]
                if STATS and stats is not None and \
                        stats.get("xtx_inverse") is not None:
                    self.xtx_inverse = stats["xtx_inverse"][:]
                    self.mean_squared_error = stats["mean_squared_error"]
                    self.number_of_samples = stats["number_of_samples"]
                    # to be used in predictions
                    self.t_crit = student_t.interval( \
                        CONFIDENCE,
                        self.number_of_samples - self.number_of_parameters)[1]
                    self.xtx_inverse = list( \
                        np.linalg.inv(np.array(self.xtx_inverse)))

            else:
                raise Exception("The linear regression isn't finished yet")
        else:
            raise Exception("Cannot create the LinearRegression instance."
                            " Could not find the 'linear_regression' key"
                            " in the resource:\n\n%s" % linear_regression)
Ejemplo n.º 7
0
    def __init__(self, time_series, api=None):

        self.resource_id = None
        self.input_fields = []
        self.objective_fields = []
        self.all_numeric_objectives = False
        self.period = 1
        self.ets_models = {}
        self.error = None
        self.damped_trend = None
        self.seasonality = None
        self.trend = None
        self.time_range = {}
        self.field_parameters = {}
        self._forecast = {}
        self.api = get_api_connection(api)

        self.resource_id, time_series = get_resource_dict( \
            time_series, "timeseries", api=self.api)

        if 'object' in time_series and \
            isinstance(time_series['object'], dict):
            time_series = time_series['object']
        try:
            self.input_fields = time_series.get("input_fields", [])
            self._forecast = time_series.get("forecast")
            self.objective_fields = time_series.get("objective_fields", [])
            objective_field = time_series['objective_field'] if \
                time_series.get('objective_field') else \
                time_series['objective_fields']
        except KeyError:
            raise ValueError("Failed to find the time series expected "
                             "JSON structure. Check your arguments.")
        if 'time_series' in time_series and \
            isinstance(time_series['time_series'], dict):
            status = get_status(time_series)
            if 'code' in status and status['code'] == FINISHED:
                time_series_info = time_series['time_series']
                fields = time_series_info.get('fields', {})
                self.fields = fields
                if not self.input_fields:
                    self.input_fields = [ \
                        field_id for field_id, _ in
                        sorted(self.fields.items(),
                               key=lambda x: x[1].get("column_number"))]
                self.all_numeric_objectives = time_series_info.get( \
                    'all_numeric_objectives')
                self.period = time_series_info.get('period', 1)
                self.ets_models = time_series_info.get('ets_models', {})
                self.error = time_series_info.get('error')
                self.damped_trend = time_series_info.get('damped_trend')
                self.seasonality = time_series_info.get('seasonality')
                self.trend = time_series_info.get('trend')
                self.time_range = time_series_info.get('time_range')
                self.field_parameters = time_series_info.get( \
                    'field_parameters', {})

                objective_id = extract_objective(objective_field)
                ModelFields.__init__(self, fields, objective_id=objective_id)
            else:
                raise Exception("The time series isn't finished yet")
        else:
            raise Exception("Cannot create the TimeSeries instance."
                            " Could not find the 'time_series' key"
                            " in the resource:\n\n%s" % time_series)
Ejemplo n.º 8
0
    def __init__(self, topic_model, api=None):

        self.resource_id = None
        self.stemmer = None
        self.seed = None
        self.case_sensitive = False
        self.bigrams = False
        self.ntopics = None
        self.temp = None
        self.phi = None
        self.term_to_index = None
        self.topics = []
        self.api = get_api_connection(api)

        self.resource_id, topic_model = get_resource_dict( \
            topic_model, "topicmodel", api=self.api)

        if 'object' in topic_model and isinstance(topic_model['object'], dict):
            topic_model = topic_model['object']

        if 'topic_model' in topic_model \
                and isinstance(topic_model['topic_model'], dict):
            status = get_status(topic_model)
            if 'code' in status and status['code'] == FINISHED:
                self.input_fields = topic_model['input_fields']
                model = topic_model['topic_model']
                self.topics = model['topics']

                if 'language' in model and  model['language'] is not None:
                    lang = model['language']
                    if lang in CODE_TO_NAME:
                        self.stemmer = Stemmer.Stemmer(CODE_TO_NAME[lang])

                self.term_to_index = {self.stem(term): index for index, term
                                      in enumerate(model['termset'])}

                self.seed = abs(model['hashed_seed'])
                self.case_sensitive = model['case_sensitive']
                self.bigrams = model['bigrams']

                self.ntopics = len(model['term_topic_assignments'][0])

                self.alpha = model['alpha']
                self.ktimesalpha = self.ntopics * self.alpha

                self.temp = [0] * self.ntopics

                assignments = model['term_topic_assignments']
                beta = model['beta']
                nterms = len(self.term_to_index)

                sums = [sum(n[index] for n in assignments) for index
                        in range(self.ntopics)]

                self.phi = [[0 for _ in range(nterms)]
                            for _ in range(self.ntopics)]

                for k in range(self.ntopics):
                    norm = sums[k] + nterms * beta
                    for w in range(nterms):
                        self.phi[k][w] = (assignments[w][k] + beta) / norm

                missing_tokens = model.get("missing_tokens")
                ModelFields.__init__(self, model['fields'],
                                     missing_tokens=missing_tokens)
            else:
                raise Exception("The topic model isn't finished yet")
        else:
            raise Exception("Cannot create the topic model instance. Could not"
                            " find the 'topic_model' key in the"
                            " resource:\n\n%s" % topic_model)
Ejemplo n.º 9
0
    def __init__(self, fusion, api=None, max_models=None):

        self.resource_id = None
        self.models_ids = None
        self.objective_id = None
        self.distribution = None
        self.models_splits = []
        self.cache_get = None
        self.regression = False
        self.fields = None
        self.class_names = None
        self.importance = {}
        self.api = get_api_connection(api)

        self.resource_id, fusion = get_resource_dict( \
            fusion, "fusion", api=self.api)

        if 'object' in fusion:
            fusion = fusion.get('object', {})
        self.model_ids, self.weights = get_models_weight( \
            fusion['models'])
        model_types = [get_resource_type(model) for model in self.model_ids]

        for model_type in model_types:
            if model_type not in LOCAL_SUPERVISED:
                raise ValueError("The resource %s has not an allowed"
                                 " supervised model type.")
        self.importance = fusion.get('importance', [])
        self.missing_numerics = fusion.get('missing_numerics', True)
        if fusion.get('fusion'):
            self.fields = fusion.get( \
                'fusion', {}).get("fields")
            self.objective_id = fusion.get("objective_field")
        self.input_fields = fusion.get("input_fields")

        number_of_models = len(self.model_ids)

        # Downloading the model information to cache it
        if self.api.storage is not None:
            for model_id in self.model_ids:
                if get_resource_type(model_id) == "fusion":
                    Fusion(model_id, api=self.api)
                else:
                    SupervisedModel(model_id, api=self.api)

        if max_models is None:
            self.models_splits = [self.model_ids]
        else:
            self.models_splits = [self.model_ids[index:(index + max_models)]
                                  for index
                                  in range(0, number_of_models, max_models)]

        if self.fields:
            summary = self.fields[self.objective_id]['summary']
            if 'bins' in summary:
                distribution = summary['bins']
            elif 'counts' in summary:
                distribution = summary['counts']
            elif 'categories' in summary:
                distribution = summary['categories']
            else:
                distribution = []
            self.distribution = distribution

        self.regression = \
            self.fields[self.objective_id].get('optype') == 'numeric'

        if not self.regression:
            objective_field = self.fields[self.objective_id]
            categories = objective_field['summary']['categories']
            classes = [category[0] for category in categories]
            self.class_names = sorted(classes)
            self.objective_categories = [category for \
                category, _ in self.fields[self.objective_id][ \
               "summary"]["categories"]]

        ModelFields.__init__( \
            self, self.fields,
            objective_id=self.objective_id)
Ejemplo n.º 10
0
    def __init__(self, model, api=None, fields=None):

        if check_model_structure(model):
            self.resource_id = model['resource']
        else:
            # If only the model id is provided, the short version of the model
            # resource is used to build a basic summary of the model
            self.api = get_api_connection(api)
            self.resource_id = get_model_id(model)
            if self.resource_id is None:
                raise Exception(
                    self.api.error_message(model,
                                           resource_type='model',
                                           method='get'))
            if fields is not None and isinstance(fields, dict):
                query_string = EXCLUDE_FIELDS
            else:
                query_string = ONLY_MODEL
            model = retrieve_resource(self.api,
                                      self.resource_id,
                                      query_string=query_string)
            # Stored copies of the model structure might lack some necessary
            # keys
            if not check_model_structure(model):
                model = self.api.get_model(self.resource_id,
                                           query_string=query_string)

        if 'object' in model and isinstance(model['object'], dict):
            model = model['object']

        if 'model' in model and isinstance(model['model'], dict):
            status = get_status(model)
            if 'code' in status and status['code'] == FINISHED:
                if (fields is None and ('model_fields' in model['model']
                                        or 'fields' in model['model'])):
                    fields = model['model'].get(
                        'model_fields', model['model'].get('fields', []))
                    # model_fields doesn't contain the datetime fields
                    fields.update(
                        datetime_fields(model['model'].get('fields', {})))
                    # pagination or exclusion might cause a field not to
                    # be in available fields dict
                    if not all(key in model['model']['fields']
                               for key in fields.keys()):
                        raise Exception("Some fields are missing"
                                        " to generate a local model."
                                        " Please, provide a model with"
                                        " the complete list of fields.")
                    for field in fields:
                        field_info = model['model']['fields'][field]
                        if 'summary' in field_info:
                            fields[field]['summary'] = field_info['summary']
                        fields[field]['name'] = field_info['name']
                objective_field = model['objective_fields']
                missing_tokens = model['model'].get('missing_tokens')

                ModelFields.__init__(
                    self,
                    fields,
                    objective_id=extract_objective(objective_field),
                    missing_tokens=missing_tokens)
                self.description = model['description']
                self.field_importance = model['model'].get('importance', None)
                if self.field_importance:
                    self.field_importance = [
                        element for element in self.field_importance
                        if element[0] in fields
                    ]
                self.locale = model.get('locale', DEFAULT_LOCALE)

            else:
                raise Exception("The model isn't finished yet")
        else:
            raise Exception("Cannot create the BaseModel instance. Could not"
                            " find the 'model' key in the resource:\n\n%s" %
                            model)
Ejemplo n.º 11
0
    def __init__(self, ensemble, api=None, max_models=None, cache_get=None):

        self.model_splits = []
        self.multi_model = None
        self.api = get_api_connection(api)
        self.fields = None
        self.class_names = None
        if use_cache(cache_get):
            # using a cache to store the model attributes
            self.__dict__ = load(get_ensemble_id(ensemble), cache_get)
            self.api = get_api_connection(api)
            if len(self.models_splits) == 1:
                # retrieve the models from a cache get function
                try:
                    models = [
                        Model(model_id, cache_get=cache_get)
                        for model_id in self.models_splits[0]
                    ]
                except Exception as exc:
                    raise Exception('Error while calling the user-given'
                                    ' function %s: %s' %
                                    (cache_get.__name__, str(exc)))
                self.multi_model = MultiModel(models,
                                              self.api,
                                              fields=self.fields,
                                              class_names=self.class_names,
                                              cache_get=cache_get)
            return

        self.resource_id = None
        self.objective_id = None
        self.distributions = None
        self.distribution = None
        self.boosting = None
        self.boosting_offsets = None
        self.cache_get = None
        self.regression = False
        self.importance = {}
        query_string = ONLY_MODEL
        no_check_fields = False
        self.input_fields = []
        if isinstance(ensemble, list):
            if all([isinstance(model, Model) for model in ensemble]):
                models = ensemble
                self.model_ids = [
                    local_model.resource_id for local_model in models
                ]
            else:
                try:
                    models = [get_model_id(model) for model in ensemble]
                    self.model_ids = models
                except ValueError as exc:
                    raise ValueError('Failed to verify the list of models.'
                                     ' Check your model id values: %s' %
                                     str(exc))

        else:
            ensemble = self.get_ensemble_resource(ensemble)
            self.resource_id = get_ensemble_id(ensemble)
            if not check_local_but_fields(ensemble):
                # avoid checking fields because of old ensembles
                ensemble = retrieve_resource(self.api,
                                             self.resource_id,
                                             no_check_fields=True)

            if ensemble['object'].get('type') == BOOSTING:
                self.boosting = ensemble['object'].get('boosting')
            models = ensemble['object']['models']
            self.distributions = ensemble['object'].get('distributions', [])
            self.importance = ensemble['object'].get('importance', [])
            self.model_ids = models
            # new ensembles have the fields structure
            if ensemble['object'].get('ensemble'):
                self.fields = ensemble['object'].get( \
                    'ensemble', {}).get("fields")
                self.objective_id = ensemble['object'].get("objective_field")
                query_string = EXCLUDE_FIELDS
                no_check_fields = True
            self.input_fields = ensemble['object'].get('input_fields')

        number_of_models = len(models)
        if max_models is None:
            self.models_splits = [models]
        else:
            self.models_splits = [
                models[index:(index + max_models)]
                for index in range(0, number_of_models, max_models)
            ]
        if len(self.models_splits) == 1:
            if not isinstance(models[0], Model):
                if use_cache(cache_get):
                    # retrieve the models from a cache get function
                    try:
                        models = [
                            Model(model_id, cache_get=cache_get)
                            for model_id in self.models_splits[0]
                        ]
                        self.cache_get = cache_get
                    except Exception as exc:
                        raise Exception('Error while calling the user-given'
                                        ' function %s: %s' %
                                        (cache_get.__name__, str(exc)))
                else:
                    models = [retrieve_resource( \
                        self.api,
                        model_id,
                        query_string=query_string,
                        no_check_fields=no_check_fields)
                              for model_id in self.models_splits[0]]
            model = models[0]

        else:
            # only retrieving first model
            self.cache_get = cache_get
            if not isinstance(models[0], Model):
                if use_cache(cache_get):
                    # retrieve the models from a cache get function
                    try:
                        model = Model(self.models_splits[0][0],
                                      cache_get=cache_get)
                        self.cache_get = cache_get
                    except Exception as exc:
                        raise Exception('Error while calling the user-given'
                                        ' function %s: %s' %
                                        (cache_get.__name__, str(exc)))
                else:
                    model = retrieve_resource( \
                        self.api,
                        self.models_splits[0][0],
                        query_string=query_string,
                        no_check_fields=no_check_fields)

                models = [model]

        if self.distributions is None:
            try:
                self.distributions = []
                for model in models:
                    self.distributions.append(
                        {'training': model.root_distribution})
            except AttributeError:
                self.distributions = [
                    model['object']['model']['distribution']
                    for model in models
                ]

        if self.boosting is None:
            self._add_models_attrs(model, max_models)

        if self.fields is None:
            self.fields, self.objective_id = self.all_model_fields(
                max_models=max_models)

        if self.fields:
            add_distribution(self)
        self.regression = \
            self.fields[self.objective_id].get('optype') == NUMERIC
        if self.boosting:
            self.boosting_offsets = ensemble['object'].get('initial_offset',
                                                           0) \
                if self.regression else dict(ensemble['object'].get( \
                    'initial_offsets', []))
        if not self.regression:
            try:
                objective_field = self.fields[self.objective_id]
                categories = objective_field['summary']['categories']
                classes = [category[0] for category in categories]
            except (AttributeError, KeyError):
                classes = set()
                for distribution in self.distributions:
                    for category in distribution['training']['categories']:
                        classes.add(category[0])

            self.class_names = sorted(classes)
            self.objective_categories = [category for \
                category, _ in self.fields[self.objective_id][ \
               "summary"]["categories"]]

        ModelFields.__init__( \
            self, self.fields,
            objective_id=self.objective_id)

        if len(self.models_splits) == 1:
            self.multi_model = MultiModel(models,
                                          self.api,
                                          fields=self.fields,
                                          class_names=self.class_names)
Ejemplo n.º 12
0
    def __init__(self, model, api=None, fields=None, cache_get=None):
        """The Model constructor can be given as first argument:
            - a model structure
            - a model id
            - a path to a JSON file containing a model structure

        """

        if use_cache(cache_get):
            # using a cache to store the model attributes
            self.__dict__ = load(get_model_id(model), cache_get)
            return

        self.resource_id = None
        self.ids_map = {}
        self.terms = {}
        self.regression = False
        self.boosting = None
        self.class_names = None
        api = get_api_connection(api)
        # retrieving model information from
        self.resource_id, model = get_resource_dict( \
            model, "model", api=api, no_check_fields=fields is not None)

        if 'object' in model and isinstance(model['object'], dict):
            model = model['object']

        if 'model' in model and isinstance(model['model'], dict):
            status = get_status(model)
            if 'code' in status and status['code'] == FINISHED:
                # fill boosting info before creating modelfields
                if model.get("boosted_ensemble"):
                    self.boosting = model.get('boosting', False)
                if self.boosting == {}:
                    self.boosting = False

                self.input_fields = model["input_fields"]
                BaseModel.__init__(self, model, api=api, fields=fields)

                root = model['model']['root']
                self.weighted = "weighted_objective_summary" in root

                if self.boosting:
                    # build boosted tree
                    self.tree = b.build_boosting_tree( \
                        model['model']['root'])
                elif self.regression:
                    self.root_distribution = model['model'][ \
                        'distribution']['training']
                    # build regression tree
                    self.tree = r.build_regression_tree(root, \
                        distribution=self.root_distribution, \
                        weighted=self.weighted)
                else:
                    # build classification tree
                    self.root_distribution = model['model'][\
                        'distribution']['training']
                    self.laplacian_term = laplacian_term( \
                        extract_distribution(self.root_distribution)[1],
                        self.weighted)
                    self.tree = c.build_classification_tree( \
                        model['model']['root'], \
                        distribution=self.root_distribution, \
                        weighted=self.weighted)
                    self.class_names = sorted( \
                        [category[0] for category in \
                        self.root_distribution["categories"]])
                    self.objective_categories = [category for \
                        category, _ in self.fields[self.objective_id][ \
                       "summary"]["categories"]]

                if self.boosting:
                    self.tree_type = BOOSTING
                    self.offsets = b.OFFSETS
                elif self.regression:
                    self.tree_type = REGRESSION
                    self.offsets = r.OFFSETS[str(self.weighted)]
                else:
                    self.tree_type = CLASSIFICATION
                    self.offsets = c.OFFSETS[str(self.weighted)]

            else:
                raise Exception("Cannot create the Model instance."
                                " Only correctly finished models can be"
                                " used. The model status is currently:"
                                " %s\n" % STATUSES[status['code']])
        else:
            raise Exception("Cannot create the Model instance. Could not"
                            " find the 'model' key in the resource:"
                            "\n\n%s" % model)
Ejemplo n.º 13
0
    def __init__(self, pca, api=None, cache_get=None):

        if use_cache(cache_get):
            # using a cache to store the model attributes
            self.__dict__ = load(get_pca_id(pca), cache_get)
            return

        self.resource_id = None
        self.input_fields = []
        self.default_numeric_value = None
        self.term_forms = {}
        self.tag_clouds = {}
        self.dataset_field_types = {}
        self.term_analysis = {}
        self.categories = {}
        self.categories_probabilities = {}
        self.items = {}
        self.fields = {}
        self.item_analysis = {}
        self.standardize = None
        self.famd_j = 1
        api = get_api_connection(api)

        self.resource_id, pca = get_resource_dict( \
            pca, "pca", api=api)

        if 'object' in pca and \
            isinstance(pca['object'], dict):
            pca = pca['object']
        try:
            self.input_fields = pca.get("input_fields", [])
            self.default_numeric_value = pca.get("default_numeric_value")
            self.dataset_field_types = pca.get("dataset_field_types", {})
            self.famd_j = 1 if (self.dataset_field_types['categorical'] != \
                self.dataset_field_types['total']) else \
                self.dataset_field_types['categorical']

        except KeyError:
            raise ValueError("Failed to find the pca expected "
                             "JSON structure. Check your arguments.")
        if 'pca' in pca and \
            isinstance(pca['pca'], dict):
            status = get_status(pca)
            if 'code' in status and status['code'] == FINISHED:
                pca_info = pca[ \
                    'pca']
                fields = pca_info.get('fields', {})
                self.fields = fields
                if not self.input_fields:
                    self.input_fields = [ \
                        field_id for field_id, _ in
                        sorted(list(self.fields.items()),
                               key=lambda x: x[1].get("column_number"))]
                missing_tokens = pca_info.get("missing_tokens")
                ModelFields.__init__(self,
                                     fields,
                                     objective_id=None,
                                     terms=True,
                                     categories=True,
                                     numerics=False,
                                     missing_tokens=missing_tokens)

                for field_id in self.categories:
                    field = self.fields[field_id]
                    probabilities = [probability for _, probability in \
                                     field["summary"]["categories"]]
                    if field["summary"].get("missing_count", 0) > 0:
                        probabilities.append(field["summary"]["missing_count"])
                    total = float(sum(probabilities))
                    if total > 0:
                        probabilities = [probability / total for probability \
                            in probabilities]
                    self.categories_probabilities[field_id] = probabilities
                self.components = pca_info.get('components')
                self.eigenvectors = pca_info.get('eigenvectors')
                self.cumulative_variance = pca_info.get('cumulative_variance')
                self.text_stats = pca_info.get('text_stats')
                self.standardized = pca_info.get('standardized')
                self.variance = pca_info.get('variance')

            else:
                raise Exception("The pca isn't finished yet")
        else:
            raise Exception("Cannot create the PCA instance."
                            " Could not find the 'pca' key"
                            " in the resource:\n\n%s" % pca)
Ejemplo n.º 14
0
    def __init__(self, ensemble, model_fns_dir, api=None):

        self.resource_id = None
        # to be deprecated
        self.ensemble_id = None
        self.objective_id = None
        self.distributions = None
        self.distribution = None
        self.models_splits = []
        self.multi_model = None
        self.boosting = None
        self.boosting_offsets = None
        self.regression = False
        self.fields = None
        self.class_names = None
        self.importance = {}
        self.predict_functions = []
        self.api = get_api_connection(api)

        ensemble = self.get_ensemble_resource(ensemble)
        self.resource_id = get_ensemble_id(ensemble)
        self.ensemble_id = self.resource_id

        if not check_local_info(ensemble):
            # avoid checking fields because of old ensembles
            ensemble = retrieve_resource(self.api,
                                         self.resource_id,
                                         no_check_fields=True)
        if ensemble['object'].get('type') == BOOSTING:
            self.boosting = ensemble['object'].get('boosting')
        models = ensemble['object']['models']
        self.distributions = ensemble['object'].get('distributions', [])
        self.importance = ensemble['object'].get('importance', [])
        self.model_ids = models
        # new ensembles have the fields structure
        if ensemble['object'].get('ensemble'):
            self.fields = ensemble['object'].get( \
                'ensemble', {}).get("fields")
            self.objective_id = ensemble['object'].get("objective_field")
            self.input_fields = ensemble['object'].get("input_fields")

        if model_fns_dir:
            self.get_model_fns(model_fns_dir)
        else:
            raise ValueError("The EnsemblePredictor object expects as"
                             " argument the directory where the models"
                             " predict functions are stored. To generate "
                             " them, please check the 'bigmler export'"
                             " command.")

        if self.fields:
            add_distribution(self)

        self.regression = \
            self.fields[self.objective_id].get('optype') == NUMERIC
        if self.boosting:
            self.boosting_offsets = ensemble['object'].get('initial_offset',
                                                           0) \
                if self.regression else dict(ensemble['object'].get( \
                    'initial_offsets', []))

        if not self.regression and self.boosting is None:
            try:
                objective_field = self.fields[self.objective_id]
                categories = objective_field['summary']['categories']
                classes = [category[0] for category in categories]
            except (AttributeError, KeyError):
                classes = set()
                for distribution in self.distributions:
                    for category in distribution['training']['categories']:
                        classes.add(category[0])

            self.class_names = sorted(classes)