def cluster_create(context, values): values = values.copy() cluster = m.Cluster() node_groups = values.pop("node_groups", []) cluster.update(values) session = get_session() with session.begin(): try: cluster.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for Cluster: %s") % e.columns) try: for ng in node_groups: node_group = m.NodeGroup() node_group.update({"cluster_id": cluster.id}) node_group.update(ng) node_group.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for NodeGroup: %s") % e.columns) return cluster_get(context, cluster.id)
def cluster_template_create(context, values): values = values.copy() cluster_template = m.ClusterTemplate() node_groups = values.pop("node_groups") or [] cluster_template.update(values) session = get_session() with session.begin(): try: cluster_template.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for ClusterTemplate: %s") % e.columns) try: for ng in node_groups: node_group = m.TemplatesRelation() node_group.update({"cluster_template_id": cluster_template.id}) node_group.update(ng) node_group.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for TemplatesRelation: %s") % e.columns) return cluster_template_get(context, cluster_template.id)
def node_group_template_update(context, values, ignore_default=False): session = get_session() try: with session.begin(): ngt_id = values['id'] ngt = _node_group_template_get(context, session, ngt_id) if not ngt: raise ex.NotFoundException( ngt_id, _("NodeGroupTemplate id '%s' not found")) elif not ignore_default and ngt.is_default: raise ex.UpdateFailedException( ngt_id, _("NodeGroupTemplate id '%s' can not be updated. " "It is a default template.")) # Check to see that the node group template to be updated is not in # use by an existing cluster. for template_relationship in ngt.templates_relations: if len(template_relationship.cluster_template.clusters) > 0: raise ex.UpdateFailedException( ngt_id, _("NodeGroupTemplate id '%s' can not be updated. " "It is referenced by an existing cluster.")) ngt.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for NodeGroupTemplate: %s") % e.columns) return ngt
def data_source_update(context, values): session = get_session() try: with session.begin(): ds_id = values['id'] data_source = _data_source_get(context, session, ds_id) if not data_source: raise ex.NotFoundException(ds_id, _("DataSource id '%s' not found")) else: jobs = job_execution_get_all(context) pending_jobs = [ job for job in jobs if job.info["status"] == "PENDING" ] for job in pending_jobs: if job.data_source_urls: if ds_id in job.data_source_urls: raise ex.UpdateFailedException( _("DataSource is used in a " "PENDING Job and can not be updated.")) data_source.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for DataSource: %s") % e.columns) return data_source
def job_create(context, values): mains = values.pop("mains", []) libs = values.pop("libs", []) interface = values.pop("interface", []) session = get_session() try: with session.begin(): job = m.Job() job.update(values) # These are 'lazy' objects. The initialization below # is needed here because it provides libs, mains, and # interface to be initialized within a session even if # the lists are empty job.mains = [] job.libs = [] job.interface = [] _append_job_binaries(context, session, mains, job.mains) _append_job_binaries(context, session, libs, job.libs) _append_interface(context, interface, job.interface) session.add(job) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry(_("Duplicate entry for Job: %s") % e.columns) return job
def cluster_template_create(context, values): values = values.copy() cluster_template = m.ClusterTemplate() node_groups = values.pop("node_groups") or [] cluster_template.update(values) session = get_session() try: with session.begin(): session.add(cluster_template) session.flush(objects=[cluster_template]) for ng in node_groups: node_group = m.TemplatesRelation() node_group.update({"cluster_template_id": cluster_template.id}) node_group.update(ng) session.add(node_group) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for object %(object)s. Failed on columns: " "%(columns)s") % { "object": e.value, "columns": e.columns }) return cluster_template_get(context, cluster_template.id)
def job_binary_internal_create(context, values): """Returns a JobBinaryInternal that does not contain a data field The data column uses deferred loading. """ values["datasize"] = len(values["data"]) datasize_KB = values["datasize"] / 1024.0 if datasize_KB > CONF.job_binary_max_KB: raise ex.DataTooBigException( round(datasize_KB, 1), CONF.job_binary_max_KB, _("Size of internal binary (%(size)sKB) is greater " "than the maximum (%(maximum)sKB)")) job_binary_int = m.JobBinaryInternal() job_binary_int.update(values) session = get_session() with session.begin(): try: job_binary_int.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobBinaryInternal: %s") % e.columns) return job_binary_internal_get(context, job_binary_int.id)
def node_group_template_update(context, values, ignore_prot_on_def=False): session = get_session() try: with session.begin(): ngt_id = values['id'] ngt = _node_group_template_get(context, session, ngt_id) if not ngt: raise ex.NotFoundException( ngt_id, _("NodeGroupTemplate id '%s' not found")) validate.check_tenant_for_update(context, ngt) if not (ngt.is_default and ignore_prot_on_def): validate.check_protected_from_update(ngt, values) # Check to see that the node group template to be updated is not in # use by an existing cluster. for template_relationship in ngt.templates_relations: if len(template_relationship.cluster_template.clusters) > 0: raise ex.UpdateFailedException( ngt_id, _("NodeGroupTemplate id '%s' can not be updated. " "It is referenced by an existing cluster.")) ngt.update(values) # Here we update any cluster templates that reference the # updated node group template for template_relationship in ngt.templates_relations: ct_id = template_relationship.cluster_template_id ct = cluster_template_get( context, template_relationship.cluster_template_id) node_groups = ct.node_groups ct_node_groups = [] for ng in node_groups: # Need to fill in all node groups, not just # the modified group ng_to_add = ng if ng.node_group_template_id == ngt_id: # use the updated node group template ng_to_add = ngt ng_to_add = ng_to_add.to_dict() ng_to_add.update({ "count": ng["count"], "node_group_template_id": ng.node_group_template_id }) ng_to_add.pop("updated_at", None) ng_to_add.pop("created_at", None) ng_to_add.pop("id", None) ct_node_groups.append(ng_to_add) ct_update = {"id": ct_id, "node_groups": ct_node_groups} cluster_template_update(context, ct_update, ignore_prot_on_def) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for NodeGroupTemplate: %s") % e.columns) return ngt
def cluster_template_update(context, values, ignore_default=False): explicit_node_groups = "node_groups" in values if explicit_node_groups: node_groups = values.pop("node_groups") if node_groups is None: node_groups = [] session = get_session() cluster_template_id = values['id'] try: with session.begin(): cluster_template = (_cluster_template_get( context, session, cluster_template_id)) if not cluster_template: raise ex.NotFoundException( cluster_template_id, _("Cluster Template id '%s' not found!")) elif not ignore_default and cluster_template.is_default: raise ex.UpdateFailedException( cluster_template_id, _("ClusterTemplate id '%s' can not be updated. " "It is a default template.") ) validate.check_tenant_for_update(context, cluster_template) validate.check_protected_from_update(cluster_template, values) if len(cluster_template.clusters) > 0: raise ex.UpdateFailedException( cluster_template_id, _("Cluster Template id '%s' can not be updated. " "It is referenced by at least one cluster.") ) cluster_template.update(values) # The flush here will cause a duplicate entry exception if # unique constraints are violated, before we go ahead and delete # the node group templates session.flush(objects=[cluster_template]) # If node_groups has not been specified, then we are # keeping the old ones so don't delete! if explicit_node_groups: model_query(m.TemplatesRelation, context, session=session).filter_by( cluster_template_id=cluster_template_id).delete() for ng in node_groups: node_group = m.TemplatesRelation() node_group.update(ng) node_group.update({"cluster_template_id": cluster_template_id}) session.add(node_group) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for ClusterTemplate: %s") % e.columns) return cluster_template_get(context, cluster_template_id)
def node_group_template_create(context, values): node_group_template = m.NodeGroupTemplate() node_group_template.update(values) try: node_group_template.save() except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry("Duplicate entry for NodeGroupTemplate: %s" % e.columns) return node_group_template
def data_source_create(context, values): data_source = m.DataSource() data_source.update(values) try: data_source.save() except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry("Duplicate entry for DataSource: %s" % e.columns) return data_source
def job_execution_create(context, values): session = get_session() with session.begin(): job_ex = m.JobExecution() job_ex.update(values) try: job_ex.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobExecution: %s") % e.columns) return job_ex
def data_source_create(context, values): data_source = m.DataSource() data_source.update(values) session = get_session() with session.begin(): try: data_source.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for DataSource: %s") % e.columns) return data_source
def node_group_template_create(context, values): node_group_template = m.NodeGroupTemplate() node_group_template.update(values) session = get_session() with session.begin(): try: node_group_template.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for NodeGroupTemplate: %s") % e.columns) return node_group_template
def cluster_template_update(context, values, ignore_default=False): node_groups = values.pop("node_groups", []) session = get_session() with session.begin(): cluster_template_id = values['id'] cluster_template = (_cluster_template_get( context, session, cluster_template_id)) if not cluster_template: raise ex.NotFoundException( cluster_template_id, _("Cluster Template id '%s' not found!")) elif not ignore_default and cluster_template.is_default: raise ex.UpdateFailedException( cluster_template_id, _("ClusterTemplate id '%s' can not be updated. " "It is a default template.") ) name = values.get('name') if name: same_name_tmpls = model_query( m.ClusterTemplate, context).filter_by( name=name).all() if (len(same_name_tmpls) > 0 and same_name_tmpls[0].id != cluster_template_id): raise ex.DBDuplicateEntry( _("Cluster Template can not be updated. " "Another cluster template with name %s already exists.") % name ) if len(cluster_template.clusters) > 0: raise ex.UpdateFailedException( cluster_template_id, _("Cluster Template id '%s' can not be updated. " "It is referenced by at least one cluster.") ) cluster_template.update(values) model_query(m.TemplatesRelation, context).filter_by( cluster_template_id=cluster_template_id).delete() for ng in node_groups: node_group = m.TemplatesRelation() node_group.update(ng) node_group.update({"cluster_template_id": cluster_template_id}) node_group.save(session=session) return cluster_template
def job_binary_create(context, values): """Returns a JobBinary that does not contain a data field The data column uses deferred loading. """ job_binary = m.JobBinary() job_binary.update(values) try: job_binary.save() except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry("Duplicate entry for JobBinary: %s" % e.columns) return job_binary
def cluster_update(context, cluster_id, values): session = get_session() try: with session.begin(): cluster = _cluster_get(context, session, cluster_id) if cluster is None: raise ex.NotFoundException(cluster_id, _("Cluster id '%s' not found!")) cluster.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for Cluster: %s") % e.columns) return cluster
def job_binary_create(context, values): """Returns a JobBinary that does not contain a data field The data column uses deferred loading. """ job_binary = m.JobBinary() job_binary.update(values) session = get_session() with session.begin(): try: job_binary.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobBinary: %s") % e.columns) return job_binary
def job_update(context, job_id, values): session = get_session() try: with session.begin(): job = _job_get(context, session, job_id) if not job: raise ex.NotFoundException(job_id, _("Job id '%s' not found!")) validate.check_tenant_for_update(context, job) validate.check_protected_from_update(job, values) job.update(values) session.add(job) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry(_("Duplicate entry for Job: %s") % e.columns) return job
def job_execution_create(context, values): session = get_session() execution_interface = values.pop('interface', {}) job_ex = m.JobExecution() job_ex.update(values) try: with session.begin(): job_ex.interface = [] job = _job_get(context, session, job_ex.job_id) if job.interface: _merge_execution_interface(job_ex, job, execution_interface) session.add(job_ex) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobExecution: %s") % e.columns) return job_ex
def data_source_update(context, values): session = get_session() try: with session.begin(): ds_id = values['id'] data_source = _data_source_get(context, session, ds_id) if not data_source: raise ex.NotFoundException(ds_id, _("DataSource id '%s' not found")) validate.check_tenant_for_update(context, data_source) validate.check_protected_from_update(data_source, values) data_source.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for DataSource: %s") % e.columns) return data_source
def node_group_template_update(context, values, ignore_default=False): session = get_session() with session.begin(): ngt_id = values['id'] node_group_template = ( _node_group_template_get(context, session, ngt_id)) if not node_group_template: raise ex.NotFoundException( ngt_id, _("NodeGroupTemplate id '%s' not found")) elif not ignore_default and node_group_template.is_default: raise ex.UpdateFailedException( ngt_id, _("NodeGroupTemplate id '%s' can not be updated. " "It is a default template.") ) name = values.get('name') if name and name != node_group_template.name: same_name_tmpls = model_query( m.NodeGroupTemplate, context).filter_by(name=name).all() if (len(same_name_tmpls) > 0 and same_name_tmpls[0].id != ngt_id): raise ex.DBDuplicateEntry( _("Node Group Template can not be updated. " "Another node group template with name %s " "already exists.") % name ) # Check to see that the node group template to be updated is not in # use by an existing cluster. for template_relationship in node_group_template.templates_relations: if len(template_relationship.cluster_template.clusters) > 0: raise ex.UpdateFailedException( ngt_id, _("NodeGroupTemplate id '%s' can not be updated. " "It is referenced by an existing cluster.") ) node_group_template.update(values) return node_group_template
def job_binary_internal_update(context, job_binary_internal_id, values): """Returns a JobBinary updated with the provided values.""" session = get_session() try: with session.begin(): j_b_i = _job_binary_internal_get(context, session, job_binary_internal_id) if not j_b_i: raise ex.NotFoundException( job_binary_internal_id, _("JobBinaryInternal id '%s' not found!")) validate.check_tenant_for_update(context, j_b_i) validate.check_protected_from_update(j_b_i, values) j_b_i.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobBinaryInternal: %s") % e.columns) return j_b_i
def job_binary_update(context, values): """Returns a JobBinary updated with the provided values.""" jb_id = values["id"] session = get_session() try: with session.begin(): jb = _job_binary_get(context, session, jb_id) if not jb: raise ex.NotFoundException(jb_id, _("JobBinary id '%s' not found")) validate.check_tenant_for_update(context, jb) validate.check_protected_from_update(jb, values) # We do not want to update the url for internal binaries new_url = values.get("url", None) if new_url and "internal-db://" in jb["url"]: if jb["url"] != new_url: raise ex.UpdateFailedException( jb_id, _("The url for JobBinary Id '%s' can not " "be updated because it is an internal-db url.")) jobs = job_execution_get_all(context) pending_jobs = [ job for job in jobs if job.info["status"] == "PENDING" ] if len(pending_jobs) > 0: for job in pending_jobs: if _check_job_binary_referenced(context, session, jb_id, job.job_id): raise ex.UpdateFailedException( jb_id, _("JobBinary Id '%s' is used in a PENDING job " "and can not be updated.")) jb.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobBinary: %s") % e.columns) return jb
def job_create(context, values): mains = values.pop("mains", []) libs = values.pop("libs", []) session = get_session() with session.begin(): job = m.Job() job.update(values) # libs and mains are 'lazy' objects. The initialization below # is needed here because it provides libs and mains to be initialized # within a session even if the lists are empty job.mains = [] job.libs = [] try: _append_job_binaries(context, session, mains, job.mains) _append_job_binaries(context, session, libs, job.libs) job.save(session=session) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for Job: %s") % e.columns) return job