def get_resource_dict(resource, resource_type, api=None): """Extracting the resource JSON info as a dict from the first argument of the local object constructors, that can be: - the path to a file that contains the JSON - the ID of the resource - the resource dict itself """ if api is None: api = BigML(storage=STORAGE) get_id = ID_GETTERS[resource_type] resource_id = None # the string can be a path to a JSON file if isinstance(resource, basestring): try: with open(resource) as resource_file: resource = json.load(resource_file) resource_id = get_id(resource) if resource_id is None: raise ValueError("The JSON file does not seem" " to contain a valid BigML %s" " representation." % resource_type) except IOError: # if it is not a path, it can be a model id resource_id = get_id(resource) if resource_id is None: if resource.find("%s/" % resource_type) > -1: raise Exception( api.error_message(resource, resource_type=resource_type, method="get")) else: raise IOError("Failed to open the expected JSON file" " at %s." % resource) except ValueError: raise ValueError("Failed to interpret %s." " JSON file expected." % resource) # checks whether the information needed for local predictions is in # the first argument if isinstance(resource, dict) and \ not check_model_fields(resource): # if the fields used by the model are not # available, use only ID to retrieve it again resource = get_id(resource) resource_id = resource if not (isinstance(resource, dict) and 'resource' in resource and resource['resource'] is not None): query_string = ONLY_MODEL resource = retrieve_resource(api, resource_id, query_string=query_string) else: resource_id = get_id(resource) return resource_id, resource
def check_local_info(model): """Whether the information in `model` is enough to use it locally """ try: return check_local_but_fields(model) and \ check_model_fields(model) except Exception: return False
def retrieve_resource(api, resource_id, query_string=ONLY_MODEL, no_check_fields=False): """ Retrieves resource info either from a local repo or from the remote server """ if api.storage is not None: try: stored_resource = "%s%s%s" % (api.storage, os.sep, resource_id.replace("/", "_")) with open(stored_resource) as resource_file: resource = json.loads(resource_file.read()) # we check that the stored resource has enough fields information # for local predictions to work. Otherwise we should retrieve it. if no_check_fields or check_model_fields(resource): return resource except ValueError: raise ValueError("The file %s contains no JSON") except IOError: pass api_getter = api.getters[get_resource_type(resource_id)] resource = check_resource(resource_id, api_getter, query_string) return resource
def __init__(self, time_series, api=None): self.resource_id = None self.input_fields = [] self.objective_fields = [] self.all_numeric_objectives = False self.period = 1 self.ets_models = {} self.error = None self.damped_trend = None self.seasonality = None self.trend = None self.time_range = {} self.field_parameters = {} self._forecast = [] # checks whether the information needed for local predictions is in # the first argument if isinstance(time_series, dict) and \ not check_model_fields(time_series): # if the fields used by the logistic regression are not # available, use only ID to retrieve it again time_series = get_time_series_id( \ time_series) self.resource_id = time_series if not (isinstance(time_series, dict) and 'resource' in time_series and time_series['resource'] is not None): if api is None: api = BigML(storage=STORAGE) self.resource_id = get_time_series_id(time_series) if self.resource_id is None: raise Exception( api.error_message(time_series, resource_type='time_series', method='get')) query_string = ONLY_MODEL time_series = retrieve_resource( api, self.resource_id, query_string=query_string) else: self.resource_id = get_time_series_id(time_series) if 'object' in time_series and \ isinstance(time_series['object'], dict): time_series = time_series['object'] try: self.input_fields = time_series.get("input_fields", []) self._forecast = time_series.get("forecast") self.objective_fields = time_series.get( "objective_fields", []) objective_field = time_series['objective_field'] if \ time_series.get('objective_field') else \ time_series['objective_fields'] except KeyError: raise ValueError("Failed to find the time series expected " "JSON structure. Check your arguments.") if 'time_series' in time_series and \ isinstance(time_series['time_series'], dict): status = get_status(time_series) if 'code' in status and status['code'] == FINISHED: time_series_info = time_series['time_series'] fields = time_series_info.get('fields', {}) self.fields = fields if not self.input_fields: self.input_fields = [ \ field_id for field_id, _ in sorted(self.fields.items(), key=lambda x: x[1].get("column_number"))] self.all_numeric_objectives = time_series_info.get( \ 'all_numeric_objectives') self.period = time_series_info.get('period', 1) self.ets_models = time_series_info.get('ets_models', {}) self.error = time_series_info.get('error') self.damped_trend = time_series_info.get('damped_trend') self.seasonality = time_series_info.get('seasonality') self.trend = time_series_info.get('trend') self.time_range = time_series_info.get('time_range') self.field_parameters = time_series_info.get( \ 'field_parameters', {}) objective_id = extract_objective(objective_field) ModelFields.__init__( self, fields, objective_id=objective_id) else: raise Exception("The time series isn't finished yet") else: raise Exception("Cannot create the TimeSeries instance." " Could not find the 'time_series' key" " in the resource:\n\n%s" % time_series)
def __init__(self, logistic_regression, api=None): self.resource_id = None self.input_fields = [] self.term_forms = {} self.tag_clouds = {} self.term_analysis = {} self.items = {} self.item_analysis = {} self.categories = {} self.coefficients = {} self.data_field_types = {} self.field_codings = {} self.numeric_fields = {} self.bias = None self.missing_numerics = None self.c = None self.eps = None self.lr_normalize = None self.balance_fields = None self.regularization = None old_coefficients = False # checks whether the information needed for local predictions is in # the first argument if isinstance(logistic_regression, dict) and \ not check_model_fields(logistic_regression): # if the fields used by the logistic regression are not # available, use only ID to retrieve it again logistic_regression = get_logistic_regression_id( \ logistic_regression) self.resource_id = logistic_regression if not (isinstance(logistic_regression, dict) and 'resource' in logistic_regression and logistic_regression['resource'] is not None): if api is None: api = BigML(storage=STORAGE) self.resource_id = get_logistic_regression_id(logistic_regression) if self.resource_id is None: raise Exception( api.error_message(logistic_regression, resource_type='logistic_regression', method='get')) query_string = ONLY_MODEL logistic_regression = retrieve_resource( api, self.resource_id, query_string=query_string) else: self.resource_id = get_logistic_regression_id(logistic_regression) if 'object' in logistic_regression and \ isinstance(logistic_regression['object'], dict): logistic_regression = logistic_regression['object'] try: self.input_fields = logistic_regression.get("input_fields", []) self.dataset_field_types = logistic_regression.get( "dataset_field_types", {}) objective_field = logistic_regression['objective_fields'] if \ logistic_regression['objective_fields'] else \ logistic_regression['objective_field'] except KeyError: raise ValueError("Failed to find the logistic regression expected " "JSON structure. Check your arguments.") if 'logistic_regression' in logistic_regression and \ isinstance(logistic_regression['logistic_regression'], dict): status = get_status(logistic_regression) if 'code' in status and status['code'] == FINISHED: logistic_regression_info = logistic_regression[ \ 'logistic_regression'] fields = logistic_regression_info.get('fields', {}) if not self.input_fields: self.input_fields = [ \ field_id for field_id, _ in sorted(self.fields.items(), key=lambda x: x[1].get("column_number"))] self.coefficients.update(logistic_regression_info.get( \ 'coefficients', [])) if not isinstance(self.coefficients.values()[0][0], list): old_coefficients = True self.bias = logistic_regression_info.get('bias', True) self.c = logistic_regression_info.get('c') self.eps = logistic_regression_info.get('eps') self.lr_normalize = logistic_regression_info.get('normalize') self.balance_fields = logistic_regression_info.get( \ 'balance_fields') self.regularization = logistic_regression_info.get( \ 'regularization') self.field_codings = logistic_regression_info.get( \ 'field_codings', {}) # old models have no such attribute, so we set it to False in # this case self.missing_numerics = logistic_regression_info.get( \ 'missing_numerics', False) objective_id = extract_objective(objective_field) for field_id, field in fields.items(): if field['optype'] == 'text': self.term_forms[field_id] = {} self.term_forms[field_id].update( field['summary']['term_forms']) self.tag_clouds[field_id] = [] self.tag_clouds[field_id] = [tag for [tag, _] in field[ 'summary']['tag_cloud']] self.term_analysis[field_id] = {} self.term_analysis[field_id].update( field['term_analysis']) if field['optype'] == 'items': self.items[field_id] = [] self.items[field_id] = [item for item, _ in \ field['summary']['items']] self.item_analysis[field_id] = {} self.item_analysis[field_id].update( field['item_analysis']) if field['optype'] == 'categorical': self.categories[field_id] = [category for \ [category, _] in field['summary']['categories']] if self.missing_numerics and field['optype'] == 'numeric': self.numeric_fields[field_id] = True ModelFields.__init__( self, fields, objective_id=objective_id) self.field_codings = logistic_regression_info.get( \ 'field_codings', {}) self.format_field_codings() for field_id in self.field_codings: if field_id not in fields and \ field_id in self.inverted_fields: self.field_codings.update( \ {self.inverted_fields[field_id]: \ self.field_codings[field_id]}) del self.field_codings[field_id] if old_coefficients: self.map_coefficients() else: raise Exception("The logistic regression isn't finished yet") else: raise Exception("Cannot create the LogisticRegression instance." " Could not find the 'logistic_regression' key" " in the resource:\n\n%s" % logistic_regression)
def __init__(self, anomaly, api=None): self.resource_id = None self.sample_size = None self.input_fields = None self.mean_depth = None self.expected_mean_depth = None self.iforest = None self.top_anomalies = None self.id_fields = [] # checks whether the information needed for local predictions is in # the first argument if isinstance(anomaly, dict) and \ not check_model_fields(anomaly): # if the fields used by the anomaly detector are not # available, use only ID to retrieve it again anomaly = get_anomaly_id(anomaly) self.resource_id = anomaly if not (isinstance(anomaly, dict) and 'resource' in anomaly and anomaly['resource'] is not None): if api is None: api = BigML(storage=STORAGE) self.resource_id = get_anomaly_id(anomaly) if self.resource_id is None: raise Exception(api.error_message(anomaly, resource_type='anomaly', method='get')) query_string = ONLY_MODEL anomaly = retrieve_resource(api, self.resource_id, query_string=query_string) else: self.resource_id = get_anomaly_id(anomaly) if 'object' in anomaly and isinstance(anomaly['object'], dict): anomaly = anomaly['object'] self.sample_size = anomaly.get('sample_size') self.input_fields = anomaly.get('input_fields') self.id_fields = anomaly.get('id_fields', []) if 'model' in anomaly and isinstance(anomaly['model'], dict): ModelFields.__init__(self, anomaly['model'].get('fields')) if ('top_anomalies' in anomaly['model'] and isinstance(anomaly['model']['top_anomalies'], list)): self.mean_depth = anomaly['model'].get('mean_depth') status = get_status(anomaly) if 'code' in status and status['code'] == FINISHED: self.expected_mean_depth = None if self.mean_depth is None or self.sample_size is None: raise Exception("The anomaly data is not complete. " "Score will" " not be available") else: default_depth = ( 2 * (DEPTH_FACTOR + \ math.log(self.sample_size - 1) - \ (float(self.sample_size - 1) / self.sample_size))) self.expected_mean_depth = min(self.mean_depth, default_depth) iforest = anomaly['model'].get('trees', []) if iforest: self.iforest = [ AnomalyTree(anomaly_tree['root'], self.fields) for anomaly_tree in iforest] self.top_anomalies = anomaly['model']['top_anomalies'] else: raise Exception("The anomaly isn't finished yet") else: raise Exception("Cannot create the Anomaly instance. Could not" " find the 'top_anomalies' key in the" " resource:\n\n%s" % anomaly['model'].keys())
def __init__(self, cluster, api=None): self.resource_id = None self.centroids = None self.cluster_global = None self.total_ss = None self.within_ss = None self.between_ss = None self.ratio_ss = None self.critical_value = None self.default_numeric_value = None self.k = None self.summary_fields = [] self.scales = {} self.term_forms = {} self.tag_clouds = {} self.term_analysis = {} self.item_analysis = {} self.items = {} self.datasets = {} self.api = api # checks whether the information needed for local predictions is in # the first argument if isinstance(cluster, dict) and \ not check_model_fields(cluster): # if the fields used by the cluster are not # available, use only ID to retrieve it again cluster = get_cluster_id(cluster) self.resource_id = cluster if not (isinstance(cluster, dict) and 'resource' in cluster and cluster['resource'] is not None): if api is None: api = BigML(storage=STORAGE) self.api = api self.resource_id = get_cluster_id(cluster) if self.resource_id is None: raise Exception( api.error_message(cluster, resource_type='cluster', method='get')) query_string = ONLY_MODEL cluster = retrieve_resource(api, self.resource_id, query_string=query_string) else: self.resource_id = get_cluster_id(cluster) if 'object' in cluster and isinstance(cluster['object'], dict): cluster = cluster['object'] if 'clusters' in cluster and isinstance(cluster['clusters'], dict): status = get_status(cluster) if 'code' in status and status['code'] == FINISHED: self.default_numeric_value = cluster.get( \ "default_numeric_value") self.summary_fields = cluster.get("summary_fields", []) self.datasets = cluster.get("cluster_datasets", {}) the_clusters = cluster['clusters'] cluster_global = the_clusters.get('global') clusters = the_clusters['clusters'] self.centroids = [Centroid(centroid) for centroid in clusters] self.cluster_global = cluster_global if cluster_global: self.cluster_global = Centroid(cluster_global) # "global" has no "name" and "count" then we set them self.cluster_global.name = GLOBAL_CLUSTER_LABEL self.cluster_global.count = \ self.cluster_global.distance['population'] self.total_ss = the_clusters.get('total_ss') self.within_ss = the_clusters.get('within_ss') if not self.within_ss: self.within_ss = sum(centroid.distance['sum_squares'] for centroid in self.centroids) self.between_ss = the_clusters.get('between_ss') self.ratio_ss = the_clusters.get('ratio_ss') self.critical_value = cluster.get('critical_value', None) self.k = cluster.get('k') self.scales.update(cluster['scales']) self.term_forms = {} self.tag_clouds = {} self.term_analysis = {} fields = cluster['clusters']['fields'] summary_fields = cluster['summary_fields'] for field_id in summary_fields: try: del fields[field_id] except KeyError: # clusters retrieved from API will only contain # model fields pass for field_id, field in fields.items(): if field['optype'] == 'text': self.term_forms[field_id] = {} self.term_forms[field_id].update( field['summary']['term_forms']) self.tag_clouds[field_id] = {} self.tag_clouds[field_id].update( field['summary']['tag_cloud']) self.term_analysis[field_id] = {} self.term_analysis[field_id].update( field['term_analysis']) if field['optype'] == 'items': self.items[field_id] = {} self.items[field_id].update( dict(field['summary']['items'])) self.item_analysis[field_id] = {} self.item_analysis[field_id].update( field['item_analysis']) ModelFields.__init__(self, fields) if not all( [field_id in self.fields for field_id in self.scales]): raise Exception("Some fields are missing" " to generate a local cluster." " Please, provide a cluster with" " the complete list of fields.") else: raise Exception("The cluster isn't finished yet") else: raise Exception("Cannot create the Cluster instance. Could not" " find the 'clusters' key in the resource:\n\n%s" % cluster)
def __init__(self, cluster, api=None): self.resource_id = None self.centroids = None self.cluster_global = None self.total_ss = None self.within_ss = None self.between_ss = None self.ratio_ss = None self.critical_value = None self.default_numeric_value = None self.k = None self.summary_fields = [] self.scales = {} self.term_forms = {} self.tag_clouds = {} self.term_analysis = {} self.item_analysis = {} self.items = {} self.datasets = {} self.api = api # checks whether the information needed for local predictions is in # the first argument if isinstance(cluster, dict) and \ not check_model_fields(cluster): # if the fields used by the cluster are not # available, use only ID to retrieve it again cluster = get_cluster_id(cluster) self.resource_id = cluster if not (isinstance(cluster, dict) and 'resource' in cluster and cluster['resource'] is not None): if api is None: api = BigML(storage=STORAGE) self.api = api self.resource_id = get_cluster_id(cluster) if self.resource_id is None: raise Exception(api.error_message(cluster, resource_type='cluster', method='get')) query_string = ONLY_MODEL cluster = retrieve_resource(api, self.resource_id, query_string=query_string) else: self.resource_id = get_cluster_id(cluster) if 'object' in cluster and isinstance(cluster['object'], dict): cluster = cluster['object'] if 'clusters' in cluster and isinstance(cluster['clusters'], dict): status = get_status(cluster) if 'code' in status and status['code'] == FINISHED: self.default_numeric_value = cluster.get( \ "default_numeric_value") self.summary_fields = cluster.get("summary_fields", []) self.datasets = cluster.get("cluster_datasets", {}) the_clusters = cluster['clusters'] cluster_global = the_clusters.get('global') clusters = the_clusters['clusters'] self.centroids = [Centroid(centroid) for centroid in clusters] self.cluster_global = cluster_global if cluster_global: self.cluster_global = Centroid(cluster_global) # "global" has no "name" and "count" then we set them self.cluster_global.name = GLOBAL_CLUSTER_LABEL self.cluster_global.count = \ self.cluster_global.distance['population'] self.total_ss = the_clusters.get('total_ss') self.within_ss = the_clusters.get('within_ss') if not self.within_ss: self.within_ss = sum(centroid.distance['sum_squares'] for centroid in self.centroids) self.between_ss = the_clusters.get('between_ss') self.ratio_ss = the_clusters.get('ratio_ss') self.critical_value = cluster.get('critical_value', None) self.k = cluster.get('k') self.scales.update(cluster['scales']) self.term_forms = {} self.tag_clouds = {} self.term_analysis = {} fields = cluster['clusters']['fields'] summary_fields = cluster['summary_fields'] for field_id in summary_fields: del fields[field_id] for field_id, field in fields.items(): if field['optype'] == 'text': self.term_forms[field_id] = {} self.term_forms[field_id].update(field[ 'summary']['term_forms']) self.tag_clouds[field_id] = {} self.tag_clouds[field_id].update(field[ 'summary']['tag_cloud']) self.term_analysis[field_id] = {} self.term_analysis[field_id].update( field['term_analysis']) if field['optype'] == 'items': self.items[field_id] = {} self.items[field_id].update( dict(field['summary']['items'])) self.item_analysis[field_id] = {} self.item_analysis[field_id].update( field['item_analysis']) ModelFields.__init__(self, fields) if not all([field_id in self.fields for field_id in self.scales]): raise Exception("Some fields are missing" " to generate a local cluster." " Please, provide a cluster with" " the complete list of fields.") else: raise Exception("The cluster isn't finished yet") else: raise Exception("Cannot create the Cluster instance. Could not" " find the 'clusters' key in the resource:\n\n%s" % cluster)
def __init__(self, model, api=None, fields=None): """The Model constructor can be given as first argument: - a model structure - a model id - a path to a JSON file containing a model structure """ self.resource_id = None self.ids_map = {} self.terms = {} self.regression = False self.boosting = None self.class_names = None if not hasattr(self, 'tree_class'): self.tree_class = Tree # the string can be a path to a JSON file if isinstance(model, basestring): try: with open(model) as model_file: model = json.load(model_file) self.resource_id = get_model_id(model) if self.resource_id is None: raise ValueError("The JSON file does not seem" " to contain a valid BigML model" " representation.") except IOError: # if it is not a path, it can be a model id self.resource_id = get_model_id(model) if self.resource_id is None: if model.find('model/') > -1: raise Exception( api.error_message(model, resource_type='model', method='get')) else: raise IOError("Failed to open the expected JSON file" " at %s" % model) except ValueError: raise ValueError("Failed to interpret %s." " JSON file expected.") # checks whether the information needed for local predictions is in # the first argument if isinstance(model, dict) and \ not fields and \ not check_model_fields(model): # if the fields used by the model are not # available, use only ID to retrieve it again model = get_model_id(model) self.resource_id = model if not (isinstance(model, dict) and 'resource' in model and model['resource'] is not None): if api is None: api = BigML(storage=STORAGE) if fields is not None and isinstance(fields, dict): query_string = EXCLUDE_FIELDS else: query_string = ONLY_MODEL model = retrieve_resource(api, self.resource_id, query_string=query_string) else: self.resource_id = get_model_id(model) BaseModel.__init__(self, model, api=api, fields=fields) if 'object' in model and isinstance(model['object'], dict): model = model['object'] if 'model' in model and isinstance(model['model'], dict): status = get_status(model) if 'code' in status and status['code'] == FINISHED: # boosting models are to be handled using the BoostedTree # class if model.get("boosted_ensemble"): self.boosting = model.get('boosting', False) if self.boosting == {}: self.boosting = False self.regression = \ not self.boosting and \ self.fields[self.objective_id]['optype'] == 'numeric' \ or (self.boosting and \ self.boosting.get("objective_class") is None) if self.boosting: self.tree = BoostedTree( model['model']['root'], self.fields, objective_field=self.objective_id) else: distribution = model['model']['distribution']['training'] # will store global information in the tree: regression and # max_bins number tree_info = {'max_bins': 0} self.tree = self.tree_class( model['model']['root'], self.fields, objective_field=self.objective_id, root_distribution=distribution, parent_id=None, ids_map=self.ids_map, tree_info=tree_info) self.tree.regression = tree_info['regression'] if self.tree.regression: try: import numpy import scipy self._max_bins = tree_info['max_bins'] self.regression_ready = True except ImportError: self.regression_ready = False else: root_dist = self.tree.distribution self.class_names = sorted([category[0] for category in root_dist]) else: raise Exception("The model isn't finished yet") else: raise Exception("Cannot create the Model instance. Could not" " find the 'model' key in the resource:\n\n%s" % model)
def __init__(self, model, api=None): """The Model constructor can be given as first argument: - a model structure - a model id - a path to a JSON file containing a model structure """ self.resource_id = None self.ids_map = {} self.terms = {} # the string can be a path to a JSON file if isinstance(model, basestring): try: with open(model) as model_file: model = json.load(model_file) self.resource_id = get_model_id(model) if self.resource_id is None: raise ValueError( "The JSON file does not seem" " to contain a valid BigML model" " representation." ) except IOError: # if it is not a path, it can be a model id self.resource_id = get_model_id(model) if self.resource_id is None: if model.find("model/") > -1: raise Exception(api.error_message(model, resource_type="model", method="get")) else: raise IOError("Failed to open the expected JSON file" " at %s" % model) except ValueError: raise ValueError("Failed to interpret %s." " JSON file expected.") # checks whether the information needed for local predictions is in # the first argument if isinstance(model, dict) and not check_model_fields(model): # if the fields used by the model are not # available, use only ID to retrieve it again model = get_model_id(model) self.resource_id = model if not (isinstance(model, dict) and "resource" in model and model["resource"] is not None): if api is None: api = BigML(storage=STORAGE) query_string = ONLY_MODEL model = retrieve_resource(api, self.resource_id, query_string=query_string) else: self.resource_id = get_model_id(model) BaseModel.__init__(self, model, api=api) if "object" in model and isinstance(model["object"], dict): model = model["object"] if "model" in model and isinstance(model["model"], dict): status = get_status(model) if "code" in status and status["code"] == FINISHED: distribution = model["model"]["distribution"]["training"] # will store global information in the tree: regression and # max_bins number tree_info = {"max_bins": 0} self.tree = Tree( model["model"]["root"], self.fields, objective_field=self.objective_id, root_distribution=distribution, parent_id=None, ids_map=self.ids_map, tree_info=tree_info, ) self.tree.regression = tree_info["regression"] if self.tree.regression: self._max_bins = tree_info["max_bins"] else: raise Exception("The model isn't finished yet") else: raise Exception( "Cannot create the Model instance. Could not" " find the 'model' key in the resource:\n\n%s" % model ) if self.tree.regression: try: import numpy import scipy self.regression_ready = True except ImportError: self.regression_ready = False
def __init__(self, model, api=None): """The Model constructor can be given as first argument: - a model structure - a model id - a path to a JSON file containing a model structure """ self.resource_id = None self.ids_map = {} self.terms = {} # the string can be a path to a JSON file if isinstance(model, basestring): try: with open(model) as model_file: model = json.load(model_file) self.resource_id = get_model_id(model) if self.resource_id is None: raise ValueError("The JSON file does not seem" " to contain a valid BigML model" " representation.") except IOError: # if it is not a path, it can be a model id self.resource_id = get_model_id(model) if self.resource_id is None: if model.find('model/') > -1: raise Exception( api.error_message(model, resource_type='model', method='get')) else: raise IOError("Failed to open the expected JSON file" " at %s" % model) except ValueError: raise ValueError("Failed to interpret %s." " JSON file expected.") # checks whether the information needed for local predictions is in # the first argument if isinstance(model, dict) and \ not check_model_fields(model): # if the fields used by the model are not # available, use only ID to retrieve it again model = get_model_id(model) self.resource_id = model if not (isinstance(model, dict) and 'resource' in model and model['resource'] is not None): if api is None: api = BigML(storage=STORAGE) query_string = ONLY_MODEL model = retrieve_resource(api, self.resource_id, query_string=query_string) else: self.resource_id = get_model_id(model) BaseModel.__init__(self, model, api=api) if 'object' in model and isinstance(model['object'], dict): model = model['object'] if 'model' in model and isinstance(model['model'], dict): status = get_status(model) if 'code' in status and status['code'] == FINISHED: distribution = model['model']['distribution']['training'] # will store global information in the tree: regression and # max_bins number tree_info = {'max_bins': 0} self.tree = Tree( model['model']['root'], self.fields, objective_field=self.objective_id, root_distribution=distribution, parent_id=None, ids_map=self.ids_map, tree_info=tree_info) self.tree.regression = tree_info['regression'] if self.tree.regression: self._max_bins = tree_info['max_bins'] else: raise Exception("The model isn't finished yet") else: raise Exception("Cannot create the Model instance. Could not" " find the 'model' key in the resource:\n\n%s" % model) if self.tree.regression: try: import numpy import scipy self.regression_ready = True except ImportError: self.regression_ready = False