def reify_dataset(self, resource_id): """Extracts the REST API arguments from the dataset JSON structure """ child = self.get_resource(resource_id) origin, parent_id = u.get_origin_info(child) parent = self.get_resource(parent_id) opts = {"create": {}, "update": {}, "get": {}} # as two-steps result from a cluster or batch prediction, centroid # or anomaly score grandparent = parent if origin in ['origin_batch_resource', 'cluster']: if origin == "cluster": opts['create'].update({"centroid": child['centroid']}) grandparents = u.get_origin_info(parent) # batch resources have two parents, choose the dataset if origin == "origin_batch_resource" and \ isinstance(grandparents, list): for gp_origin, grandparent in grandparents: if gp_origin == "dataset": break else: _, grandparent = grandparents grandparent = self.get_resource(grandparent) # options common to all model types call = "update" if origin == "origin_batch_resource" else "create" u.common_dataset_opts(child, grandparent, opts, call=call) # update options dataset_defaults = DEFAULTS["dataset"].get("update", {}) for attribute, default_value in dataset_defaults.items(): opts["update"].update( u.default_setting(child, attribute, *default_value)) # name, exclude automatic naming alternatives autonames = [u''] u.non_automatic_name(child, opts, autonames=autonames) # objective field resource_fields = Fields({ 'resource': child['resource'], 'object': child }) objective_id = child['objective_field']['id'] preferred_fields = resource_fields.preferred_fields() # if there's no preferred fields, use the fields structure if len(preferred_fields.keys()) == 0: preferred_fields = resource_fields.fields max_column = sorted([ field['column_number'] for _, field in preferred_fields.items() if field['optype'] != "text" ], reverse=True)[0] objective_column = resource_fields.fields[objective_id][ \ 'column_number'] if objective_column != max_column: opts['create'].update({"objective_field": {"id": objective_id}}) if origin != "origin_batch_resource": # resize if (child['size'] != grandparent['size'] and get_resource_type(parent) == 'source'): opts['create'].update({"size": child['size']}) # generated fields if child.get('new_fields', None): new_fields = child['new_fields'] for new_field in new_fields: new_field['field'] = new_field['generator'] del new_field['generator'] opts['create'].update({"new_fields": new_fields}) u.range_opts(child, grandparent, opts) # for batch_predictions, batch_clusters, batch_anomalies generated # datasets, attributes cannot be set at creation time, so we # must update the resource instead suffix = None if origin == "origin_batch_resource": opts["update"].update(opts["create"]) opts["create"] = {} suffix = "['object']['output_dataset_resource']" calls = u.build_calls(resource_id, [parent_id], opts, suffix=suffix) self.add(resource_id, calls)
def reify_dataset(self, resource_id): """Extracts the REST API arguments from the dataset JSON structure """ child = self.get_resource(resource_id) origin, parent_id = u.get_origin_info(child) parent = self.get_resource(parent_id) opts = {"create": {}, "update": {}} # as two-steps result from a cluster or batch prediction, centroid # or anomaly score if origin in ["origin_batch_resource", "cluster"]: if origin == "cluster": opts["create"].update({"centroid": child["centroid"]}) _, grandparent = u.get_origin_info(parent) grandparent = self.get_resource(grandparent) else: grandparent = parent # options common to all model types u.common_dataset_opts(child, grandparent, opts) # update options dataset_defaults = DEFAULTS["dataset"].get("update", {}) dataset_defaults.update(COMMON_DEFAULTS.get("update", {})) for attribute, default_value in dataset_defaults.items(): opts["update"].update(u.default_setting(child, attribute, *default_value)) # name, exclude automatic naming alternatives autonames = [u""] suffixes = [ u"filtered", u"sampled", u"dataset", u"extended", u"- batchprediction", u"- batchanomalyscore", u"- batchcentroid", u"- merged", ] autonames.extend([u"%s %s" % (grandparent.get("name", ""), suffix) for suffix in suffixes]) autonames.append(u"%s's dataset" % ".".join(parent["name"].split(".")[0:-1])) autonames.append(u"%s' dataset" % ".".join(parent["name"].split(".")[0:-1])) autonames.append(u"Cluster %s - %s" % (int(child.get("centroid", "0"), base=16), parent["name"])) autonames.append(u"Dataset from %s model - segment" % parent["name"]) u.non_automatic_name(child, opts, autonames=autonames) # objective field resource_fields = Fields({"resource": child["resource"], "object": child}) objective_id = child["objective_field"]["id"] preferred_fields = resource_fields.preferred_fields() max_column = sorted([field["column_number"] for _, field in preferred_fields.items()], reverse=True)[0] objective_column = resource_fields.fields[objective_id]["column_number"] if objective_column != max_column: opts["create"].update({"objective_field": {"id": objective_id}}) # resize if child["size"] != grandparent["size"] and get_resource_type(parent) == "source": opts["create"].update({"size": child["size"]}) # generated fields if child.get("new_fields", None): new_fields = child["new_fields"] for new_field in new_fields: new_field["field"] = new_field["generator"] del new_field["generator"] opts["create"].update({"new_fields": new_fields}) u.range_opts(child, grandparent, opts) calls = u.build_calls(resource_id, [parent_id], opts) self.add(resource_id, calls)
def reify_dataset(self, resource_id): """Extracts the REST API arguments from the dataset JSON structure """ child = self.get_resource(resource_id) origin, parent_id = u.get_origin_info(child) parent = self.get_resource(parent_id) opts = {"create": {}, "update": {}, "get": {}} # as two-steps result from a cluster or batch prediction, centroid # or anomaly score grandparent = parent if origin in ['origin_batch_resource', 'cluster']: if origin == "cluster": opts['create'].update({"centroid": child['centroid']}) grandparents = u.get_origin_info(parent) # batch resources have two parents, choose the dataset if origin == "origin_batch_resource" and \ isinstance(grandparents, list): for gp_origin, grandparent in grandparents: if gp_origin == "dataset": break else: _, grandparent = grandparents grandparent = self.get_resource(grandparent) # options common to all model types call = "update" if origin == "origin_batch_resource" else "create" u.common_dataset_opts(child, grandparent, opts, call=call) # update options dataset_defaults = DEFAULTS["dataset"].get("update", {}) for attribute, default_value in dataset_defaults.items(): opts["update"].update( u.default_setting(child, attribute, *default_value)) # name, exclude automatic naming alternatives autonames = [u''] u.non_automatic_name(child, opts, autonames=autonames) # objective field resource_fields = Fields( {'resource': child['resource'], 'object': child}) objective_id = child['objective_field']['id'] preferred_fields = resource_fields.preferred_fields() # if there's no preferred fields, use the fields structure if len(preferred_fields.keys()) == 0: preferred_fields = resource_fields.fields max_column = sorted([field['column_number'] for _, field in preferred_fields.items() if field['optype'] != "text"], reverse=True)[0] objective_column = resource_fields.fields[objective_id][ \ 'column_number'] if objective_column != max_column: opts['create'].update({"objective_field": {"id": objective_id}}) if origin != "origin_batch_resource": # resize if (child['size'] != grandparent['size'] and get_resource_type(parent) == 'source'): opts['create'].update({"size": child['size']}) # generated fields if child.get('new_fields', None): new_fields = child['new_fields'] for new_field in new_fields: new_field['field'] = new_field['generator'] del new_field['generator'] opts['create'].update({"new_fields": new_fields}) u.range_opts(child, grandparent, opts) # for batch_predictions, batch_clusters, batch_anomalies generated # datasets, attributes cannot be set at creation time, so we # must update the resource instead suffix = None if origin == "origin_batch_resource": opts["update"].update(opts["create"]) opts["create"] = {} suffix = "['object']['output_dataset_resource']" calls = u.build_calls(resource_id, [parent_id], opts, suffix=suffix) self.add(resource_id, calls)
def reify_dataset(self, resource_id): """Extracts the REST API arguments from the dataset JSON structure """ child = self.get_resource(resource_id) origin, parent_id = u.get_origin_info(child) parent = self.get_resource(parent_id) opts = {"create": {}, "update": {}} # as two-steps result from a cluster or batch prediction, centroid # or anomaly score if origin in ['origin_batch_resource', 'cluster']: if origin == "cluster": opts['create'].update({"centroid": child['centroid']}) _, grandparent = u.get_origin_info(parent) grandparent = self.get_resource(grandparent) else: grandparent = parent # options common to all model types u.common_dataset_opts(child, grandparent, opts) # update options dataset_defaults = DEFAULTS["dataset"].get("update", {}) dataset_defaults.update(COMMON_DEFAULTS.get("update", {})) for attribute, default_value in dataset_defaults.items(): opts["update"].update( u.default_setting(child, attribute, *default_value)) # name, exclude automatic naming alternatives autonames = [u''] suffixes = [ u"filtered", u"sampled", u"dataset", u"extended", u"- batchprediction", u"- batchanomalyscore", u"- batchcentroid", u"- merged" ] autonames.extend([ u'%s %s' % (grandparent.get('name', ''), suffix) for suffix in suffixes ]) autonames.append(u"%s's dataset" % '.'.join(parent['name'].split('.')[0:-1])) autonames.append(u"%s' dataset" % '.'.join(parent['name'].split('.')[0:-1])) autonames.append( u"Cluster %s - %s" % (int(child.get('centroid', "0"), base=16), parent['name'])) autonames.append(u"Dataset from %s model - segment" % parent['name']) u.non_automatic_name(child, opts, autonames=autonames) # objective field resource_fields = Fields({ 'resource': child['resource'], 'object': child }) objective_id = child['objective_field']['id'] preferred_fields = resource_fields.preferred_fields() max_column = sorted( [field['column_number'] for _, field in preferred_fields.items()], reverse=True)[0] objective_column = resource_fields.fields[objective_id][ \ 'column_number'] if objective_column != max_column: opts['create'].update({"objective_field": {"id": objective_id}}) # resize if (child['size'] != grandparent['size'] and get_resource_type(parent) == 'source'): opts['create'].update({"size": child['size']}) # generated fields if child.get('new_fields', None): new_fields = child['new_fields'] for new_field in new_fields: new_field['field'] = new_field['generator'] del new_field['generator'] opts['create'].update({"new_fields": new_fields}) u.range_opts(child, grandparent, opts) calls = u.build_calls(resource_id, [parent_id], opts) self.add(resource_id, calls)