Ejemplo n.º 1
0
Archivo: api.py Proyecto: uladz/sahara
def node_group_template_update(context, values, ignore_default=False):
    session = get_session()
    try:
        with session.begin():
            ngt_id = values['id']
            ngt = _node_group_template_get(context, session, ngt_id)
            if not ngt:
                raise ex.NotFoundException(
                    ngt_id, _("NodeGroupTemplate id '%s' not found"))
            elif not ignore_default and ngt.is_default:
                raise ex.UpdateFailedException(
                    ngt_id,
                    _("NodeGroupTemplate id '%s' can not be updated. "
                      "It is a default template.")
                )

            validate.check_tenant_for_update(context, ngt)
            validate.check_protected_from_update(ngt, values)

            # Check to see that the node group template to be updated is not in
            # use by an existing cluster.
            for template_relationship in ngt.templates_relations:
                if len(template_relationship.cluster_template.clusters) > 0:
                    raise ex.UpdateFailedException(
                        ngt_id,
                        _("NodeGroupTemplate id '%s' can not be updated. "
                          "It is referenced by an existing cluster.")
                    )

            ngt.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for NodeGroupTemplate: %s") % e.columns)

    return ngt
Ejemplo n.º 2
0
def check_cluster_update(cluster_id, data, **kwargs):
    cluster = api.get_cluster(cluster_id)

    verification = verification_base.validate_verification_ops(cluster, data)
    acl.check_tenant_for_update(context.current(), cluster)
    if not verification:
        acl.check_protected_from_update(cluster, data)
Ejemplo n.º 3
0
Archivo: api.py Proyecto: crobby/sahara
def data_source_update(context, values):
    session = get_session()
    try:
        with session.begin():
            ds_id = values['id']
            data_source = _data_source_get(context, session, ds_id)
            if not data_source:
                raise ex.NotFoundException(
                    ds_id, _("DataSource id '%s' not found"))

            validate.check_tenant_for_update(context, data_source)
            validate.check_protected_from_update(data_source, values)

            jobs = job_execution_get_all(context)
            pending_jobs = [job for job in jobs if
                            job.info["status"] == "PENDING"]

            for job in pending_jobs:
                if job.data_source_urls:
                    if ds_id in job.data_source_urls:
                        raise ex.UpdateFailedException(
                            _("DataSource is used in a "
                              "PENDING Job and can not be updated."))

            data_source.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for DataSource: %s") % e.columns)

    return data_source
Ejemplo n.º 4
0
def check_cluster_update(cluster_id, data, **kwargs):
    cluster = api.get_cluster(cluster_id)

    verification = verification_base.validate_verification_ops(cluster, data)
    acl.check_tenant_for_update(context.current(), cluster)
    if not verification:
        acl.check_protected_from_update(cluster, data)
Ejemplo n.º 5
0
Archivo: api.py Proyecto: uladz/sahara
def data_source_update(context, values):
    session = get_session()
    try:
        with session.begin():
            ds_id = values['id']
            data_source = _data_source_get(context, session, ds_id)
            if not data_source:
                raise ex.NotFoundException(
                    ds_id, _("DataSource id '%s' not found"))

            validate.check_tenant_for_update(context, data_source)
            validate.check_protected_from_update(data_source, values)

            jobs = job_execution_get_all(context)
            pending_jobs = [job for job in jobs if
                            job.info["status"] == "PENDING"]

            for job in pending_jobs:
                if job.data_source_urls:
                    if ds_id in job.data_source_urls:
                        raise ex.UpdateFailedException(
                            _("DataSource is used in a "
                              "PENDING Job and can not be updated."))

            data_source.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for DataSource: %s") % e.columns)

    return data_source
Ejemplo n.º 6
0
Archivo: api.py Proyecto: crobby/sahara
def node_group_template_update(context, values, ignore_default=False):
    session = get_session()
    try:
        with session.begin():
            ngt_id = values['id']
            ngt = _node_group_template_get(context, session, ngt_id)
            if not ngt:
                raise ex.NotFoundException(
                    ngt_id, _("NodeGroupTemplate id '%s' not found"))
            elif not ignore_default and ngt.is_default:
                raise ex.UpdateFailedException(
                    ngt_id,
                    _("NodeGroupTemplate id '%s' can not be updated. "
                      "It is a default template.")
                )

            validate.check_tenant_for_update(context, ngt)
            validate.check_protected_from_update(ngt, values)

            # Check to see that the node group template to be updated is not in
            # use by an existing cluster.
            for template_relationship in ngt.templates_relations:
                if len(template_relationship.cluster_template.clusters) > 0:
                    raise ex.UpdateFailedException(
                        ngt_id,
                        _("NodeGroupTemplate id '%s' can not be updated. "
                          "It is referenced by an existing cluster.")
                    )

            ngt.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for NodeGroupTemplate: %s") % e.columns)

    return ngt
Ejemplo n.º 7
0
def node_group_template_update(context, values, ignore_prot_on_def=False):
    session = get_session()
    try:
        with session.begin():
            ngt_id = values['id']
            ngt = _node_group_template_get(context, session, ngt_id)
            if not ngt:
                raise ex.NotFoundException(
                    ngt_id, _("NodeGroupTemplate id '%s' not found"))

            validate.check_tenant_for_update(context, ngt)
            if not (ngt.is_default and ignore_prot_on_def):
                validate.check_protected_from_update(ngt, values)

            # Check to see that the node group template to be updated is not in
            # use by an existing cluster.
            for template_relationship in ngt.templates_relations:
                if len(template_relationship.cluster_template.clusters) > 0:
                    raise ex.UpdateFailedException(
                        ngt_id,
                        _("NodeGroupTemplate id '%s' can not be updated. "
                          "It is referenced by an existing cluster."))

            ngt.update(values)

            # Here we update any cluster templates that reference the
            # updated node group template
            for template_relationship in ngt.templates_relations:
                ct_id = template_relationship.cluster_template_id
                ct = cluster_template_get(
                    context, template_relationship.cluster_template_id)
                node_groups = ct.node_groups
                ct_node_groups = []
                for ng in node_groups:
                    # Need to fill in all node groups, not just
                    # the modified group
                    ng_to_add = ng
                    if ng.node_group_template_id == ngt_id:
                        # use the updated node group template
                        ng_to_add = ngt
                    ng_to_add = ng_to_add.to_dict()
                    ng_to_add.update({
                        "count":
                        ng["count"],
                        "node_group_template_id":
                        ng.node_group_template_id
                    })
                    ng_to_add.pop("updated_at", None)
                    ng_to_add.pop("created_at", None)
                    ng_to_add.pop("id", None)
                    ct_node_groups.append(ng_to_add)
                ct_update = {"id": ct_id, "node_groups": ct_node_groups}
                cluster_template_update(context, ct_update, ignore_prot_on_def)

    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for NodeGroupTemplate: %s") % e.columns)

    return ngt
Ejemplo n.º 8
0
Archivo: api.py Proyecto: crobby/sahara
def cluster_template_update(context, values, ignore_default=False):
    explicit_node_groups = "node_groups" in values
    if explicit_node_groups:
        node_groups = values.pop("node_groups")
        if node_groups is None:
            node_groups = []

    session = get_session()
    cluster_template_id = values['id']
    try:
        with session.begin():
            cluster_template = (_cluster_template_get(
                context, session, cluster_template_id))
            if not cluster_template:
                raise ex.NotFoundException(
                    cluster_template_id,
                    _("Cluster Template id '%s' not found!"))

            elif not ignore_default and cluster_template.is_default:
                raise ex.UpdateFailedException(
                    cluster_template_id,
                    _("ClusterTemplate id '%s' can not be updated. "
                      "It is a default template.")
                )

            validate.check_tenant_for_update(context, cluster_template)
            validate.check_protected_from_update(cluster_template, values)

            if len(cluster_template.clusters) > 0:
                raise ex.UpdateFailedException(
                    cluster_template_id,
                    _("Cluster Template id '%s' can not be updated. "
                      "It is referenced by at least one cluster.")
                )
            cluster_template.update(values)
            # The flush here will cause a duplicate entry exception if
            # unique constraints are violated, before we go ahead and delete
            # the node group templates
            session.flush(objects=[cluster_template])

            # If node_groups has not been specified, then we are
            # keeping the old ones so don't delete!
            if explicit_node_groups:
                model_query(m.TemplatesRelation,
                            context, session=session).filter_by(
                    cluster_template_id=cluster_template_id).delete()

                for ng in node_groups:
                    node_group = m.TemplatesRelation()
                    node_group.update(ng)
                    node_group.update({"cluster_template_id":
                                       cluster_template_id})
                    session.add(node_group)

    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for ClusterTemplate: %s") % e.columns)

    return cluster_template_get(context, cluster_template_id)
Ejemplo n.º 9
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    ctx = context.current()
    cluster = api.get_cluster(id=cluster_id)

    if cluster is None:
        raise ex.NotFoundException({'id': cluster_id},
                                   _('Object with %s not found'))

    b.check_plugin_labels(cluster.plugin_name, cluster.hadoop_version)

    acl.check_tenant_for_update(ctx, cluster)
    acl.check_protected_from_update(cluster, data)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = service_api.OPS.get_engine_type_and_version()
    if (not cluster_engine
            and not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {
                "old_engine": cluster.sahara_info.get('infrastructure_engine'),
                "new_engine": engine_type_and_version
            })

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and
            (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                      'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != c_u.CLUSTER_STATUS_ACTIVE:
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if cluster.user_keypair_id:
        b.check_keypair_exists(cluster.user_keypair_id)

    if cluster.default_image_id:
        b.check_image_registered(cluster.default_image_id)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
Ejemplo n.º 10
0
def check_cluster_scaling(data, cluster_id, **kwargs):
    ctx = context.current()
    cluster = api.get_cluster(id=cluster_id)

    if cluster is None:
        raise ex.NotFoundException(
            {'id': cluster_id}, _('Object with %s not found'))

    b.check_plugin_labels(
        cluster.plugin_name, cluster.hadoop_version)

    acl.check_tenant_for_update(ctx, cluster)
    acl.check_protected_from_update(cluster, data)

    cluster_engine = cluster.sahara_info.get(
        'infrastructure_engine') if cluster.sahara_info else None

    engine_type_and_version = service_api.OPS.get_engine_type_and_version()
    if (not cluster_engine and
            not engine_type_and_version.startswith('direct')):
        raise ex.InvalidReferenceException(
            _("Cluster created before Juno release "
              "can't be scaled with %(engine)s engine") %
            {"engine": engine_type_and_version})

    if (cluster.sahara_info and
            cluster_engine != engine_type_and_version):
        raise ex.InvalidReferenceException(
            _("Cluster created with %(old_engine)s infrastructure engine "
              "can't be scaled with %(new_engine)s engine") %
            {"old_engine": cluster.sahara_info.get('infrastructure_engine'),
             "new_engine": engine_type_and_version})

    if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'scale_cluster') and (
            plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
                                                     'decommission_nodes'))):
        raise ex.InvalidReferenceException(
            _("Requested plugin '%s' doesn't support cluster scaling feature")
            % cluster.plugin_name)

    if cluster.status != c_u.CLUSTER_STATUS_ACTIVE:
        raise ex.InvalidReferenceException(
            _("Cluster cannot be scaled not in 'Active' status. "
              "Cluster status: %s") % cluster.status)

    if cluster.user_keypair_id:
        b.check_keypair_exists(cluster.user_keypair_id)

    if cluster.default_image_id:
        b.check_image_registered(cluster.default_image_id)

    if data.get("resize_node_groups"):
        b.check_resize(cluster, data['resize_node_groups'])

    if data.get("add_node_groups"):
        b.check_add_node_groups(cluster, data['add_node_groups'])
        b.check_cluster_hostnames_lengths(cluster.name,
                                          data['add_node_groups'])
Ejemplo n.º 11
0
Archivo: api.py Proyecto: uladz/sahara
def cluster_template_update(context, values, ignore_default=False):
    explicit_node_groups = "node_groups" in values
    if explicit_node_groups:
        node_groups = values.pop("node_groups")
        if node_groups is None:
            node_groups = []

    session = get_session()
    cluster_template_id = values['id']
    try:
        with session.begin():
            cluster_template = (_cluster_template_get(
                context, session, cluster_template_id))
            if not cluster_template:
                raise ex.NotFoundException(
                    cluster_template_id,
                    _("Cluster Template id '%s' not found!"))

            elif not ignore_default and cluster_template.is_default:
                raise ex.UpdateFailedException(
                    cluster_template_id,
                    _("ClusterTemplate id '%s' can not be updated. "
                      "It is a default template.")
                )

            validate.check_tenant_for_update(context, cluster_template)
            validate.check_protected_from_update(cluster_template, values)

            if len(cluster_template.clusters) > 0:
                raise ex.UpdateFailedException(
                    cluster_template_id,
                    _("Cluster Template id '%s' can not be updated. "
                      "It is referenced by at least one cluster.")
                )
            cluster_template.update(values)
            # The flush here will cause a duplicate entry exception if
            # unique constraints are violated, before we go ahead and delete
            # the node group templates
            session.flush(objects=[cluster_template])

            # If node_groups has not been specified, then we are
            # keeping the old ones so don't delete!
            if explicit_node_groups:
                model_query(m.TemplatesRelation,
                            context, session=session).filter_by(
                    cluster_template_id=cluster_template_id).delete()

                for ng in node_groups:
                    node_group = m.TemplatesRelation()
                    node_group.update(ng)
                    node_group.update({"cluster_template_id":
                                       cluster_template_id})
                    session.add(node_group)

    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for ClusterTemplate: %s") % e.columns)

    return cluster_template_get(context, cluster_template_id)
Ejemplo n.º 12
0
def node_group_template_update(context, values, ignore_prot_on_def=False):
    session = get_session()
    try:
        with session.begin():
            ngt_id = values['id']
            ngt = _node_group_template_get(context, session, ngt_id)
            if not ngt:
                raise ex.NotFoundException(
                    ngt_id, _("NodeGroupTemplate id '%s' not found"))

            validate.check_tenant_for_update(context, ngt)
            if not (ngt.is_default and ignore_prot_on_def):
                validate.check_protected_from_update(ngt, values)

            # Check to see that the node group template to be updated is not in
            # use by an existing cluster.
            for template_relationship in ngt.templates_relations:
                if len(template_relationship.cluster_template.clusters) > 0:
                    raise ex.UpdateFailedException(
                        ngt_id,
                        _("NodeGroupTemplate id '%s' can not be updated. "
                          "It is referenced by an existing cluster.")
                    )

            ngt.update(values)

            # Here we update any cluster templates that reference the
            # updated node group template
            for template_relationship in ngt.templates_relations:
                ct_id = template_relationship.cluster_template_id
                ct = cluster_template_get(
                    context, template_relationship.cluster_template_id)
                node_groups = ct.node_groups
                ct_node_groups = []
                for ng in node_groups:
                    # Need to fill in all node groups, not just
                    # the modified group
                    ng_to_add = ng
                    if ng.node_group_template_id == ngt_id:
                        # use the updated node group template
                        ng_to_add = ngt
                    ng_to_add = ng_to_add.to_dict()
                    ng_to_add.update(
                        {"count": ng["count"],
                         "node_group_template_id": ng.node_group_template_id})
                    ng_to_add.pop("updated_at", None)
                    ng_to_add.pop("created_at", None)
                    ng_to_add.pop("id", None)
                    ct_node_groups.append(ng_to_add)
                ct_update = {"id": ct_id,
                             "node_groups": ct_node_groups}
                cluster_template_update(context, ct_update, ignore_prot_on_def)

    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for NodeGroupTemplate: %s") % e.columns)

    return ngt
Ejemplo n.º 13
0
def job_update(context, job_id, values):
    session = get_session()
    try:
        with session.begin():
            job = _job_get(context, session, job_id)
            if not job:
                raise ex.NotFoundException(job_id, _("Job id '%s' not found!"))

            validate.check_tenant_for_update(context, job)
            validate.check_protected_from_update(job, values)

            job.update(values)
            session.add(job)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(_("Duplicate entry for Job: %s") % e.columns)

    return job
Ejemplo n.º 14
0
Archivo: api.py Proyecto: crobby/sahara
def job_execution_update(context, job_execution_id, values):
    session = get_session()

    with session.begin():
        job_ex = _job_execution_get(context, session, job_execution_id)
        if not job_ex:
            raise ex.NotFoundException(job_execution_id,
                                       _("JobExecution id '%s' not found!"))

        # Skip this check for periodic tasks
        if context.tenant_id:
            validate.check_tenant_for_update(context, job_ex)
        validate.check_protected_from_update(job_ex, values)

        job_ex.update(values)
        session.add(job_ex)

    return job_ex
Ejemplo n.º 15
0
def data_source_update(context, values):
    session = get_session()
    try:
        with session.begin():
            ds_id = values['id']
            data_source = _data_source_get(context, session, ds_id)
            if not data_source:
                raise ex.NotFoundException(ds_id,
                                           _("DataSource id '%s' not found"))

            validate.check_tenant_for_update(context, data_source)
            validate.check_protected_from_update(data_source, values)

            data_source.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for DataSource: %s") % e.columns)

    return data_source
Ejemplo n.º 16
0
def data_source_update(context, values):
    session = get_session()
    try:
        with session.begin():
            ds_id = values['id']
            data_source = _data_source_get(context, session, ds_id)
            if not data_source:
                raise ex.NotFoundException(
                    ds_id, _("DataSource id '%s' not found"))

            validate.check_tenant_for_update(context, data_source)
            validate.check_protected_from_update(data_source, values)

            data_source.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for DataSource: %s") % e.columns)

    return data_source
Ejemplo n.º 17
0
Archivo: api.py Proyecto: crobby/sahara
def cluster_update(context, cluster_id, values):
    session = get_session()

    try:
        with session.begin():
            cluster = _cluster_get(context, session, cluster_id)
            if cluster is None:
                raise ex.NotFoundException(cluster_id,
                                           _("Cluster id '%s' not found!"))

            validate.check_tenant_for_update(context, cluster)
            validate.check_protected_from_update(cluster, values)

            cluster.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for Cluster: %s") % e.columns)

    return cluster
Ejemplo n.º 18
0
Archivo: api.py Proyecto: crobby/sahara
def job_update(context, job_id, values):
    session = get_session()
    try:
        with session.begin():
            job = _job_get(context, session, job_id)
            if not job:
                raise ex.NotFoundException(job_id,
                                           _("Job id '%s' not found!"))

            validate.check_tenant_for_update(context, job)
            validate.check_protected_from_update(job, values)

            job.update(values)
            session.add(job)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for Job: %s") % e.columns)

    return job
Ejemplo n.º 19
0
Archivo: api.py Proyecto: crobby/sahara
def job_binary_internal_update(context, job_binary_internal_id, values):
    """Returns a JobBinary updated with the provided values."""
    session = get_session()
    try:
        with session.begin():
            j_b_i = _job_binary_internal_get(
                context, session, job_binary_internal_id)
            if not j_b_i:
                raise ex.NotFoundException(
                    job_binary_internal_id,
                    _("JobBinaryInternal id '%s' not found!"))

            validate.check_tenant_for_update(context, j_b_i)
            validate.check_protected_from_update(j_b_i, values)

            j_b_i.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for JobBinaryInternal: %s") % e.columns)

    return j_b_i
Ejemplo n.º 20
0
def job_binary_internal_update(context, job_binary_internal_id, values):
    """Returns a JobBinary updated with the provided values."""
    session = get_session()
    try:
        with session.begin():
            j_b_i = _job_binary_internal_get(context, session,
                                             job_binary_internal_id)
            if not j_b_i:
                raise ex.NotFoundException(
                    job_binary_internal_id,
                    _("JobBinaryInternal id '%s' not found!"))

            validate.check_tenant_for_update(context, j_b_i)
            validate.check_protected_from_update(j_b_i, values)

            j_b_i.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for JobBinaryInternal: %s") % e.columns)

    return j_b_i
Ejemplo n.º 21
0
def job_binary_update(context, values):
    """Returns a JobBinary updated with the provided values."""
    jb_id = values["id"]
    session = get_session()
    try:
        with session.begin():
            jb = _job_binary_get(context, session, jb_id)
            if not jb:
                raise ex.NotFoundException(jb_id,
                                           _("JobBinary id '%s' not found"))

            validate.check_tenant_for_update(context, jb)
            validate.check_protected_from_update(jb, values)

            # We do not want to update the url for internal binaries
            new_url = values.get("url", None)
            if new_url and "internal-db://" in jb["url"]:
                if jb["url"] != new_url:
                    raise ex.UpdateFailedException(
                        jb_id,
                        _("The url for JobBinary Id '%s' can not "
                          "be updated because it is an internal-db url."))
            jobs = job_execution_get_all(context)
            pending_jobs = [
                job for job in jobs if job.info["status"] == "PENDING"
            ]
            if len(pending_jobs) > 0:
                for job in pending_jobs:
                    if _check_job_binary_referenced(context, session, jb_id,
                                                    job.job_id):
                        raise ex.UpdateFailedException(
                            jb_id,
                            _("JobBinary Id '%s' is used in a PENDING job "
                              "and can not be updated."))
            jb.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for JobBinary: %s") % e.columns)

    return jb
Ejemplo n.º 22
0
Archivo: api.py Proyecto: crobby/sahara
def job_binary_update(context, values):
    """Returns a JobBinary updated with the provided values."""
    jb_id = values["id"]
    session = get_session()
    try:
        with session.begin():
            jb = _job_binary_get(context, session, jb_id)
            if not jb:
                raise ex.NotFoundException(
                    jb_id, _("JobBinary id '%s' not found"))

            validate.check_tenant_for_update(context, jb)
            validate.check_protected_from_update(jb, values)

            # We do not want to update the url for internal binaries
            new_url = values.get("url", None)
            if new_url and "internal-db://" in jb["url"]:
                if jb["url"] != new_url:
                    raise ex.UpdateFailedException(
                        jb_id,
                        _("The url for JobBinary Id '%s' can not "
                          "be updated because it is an internal-db url."))
            jobs = job_execution_get_all(context)
            pending_jobs = [job for job in jobs if
                            job.info["status"] == "PENDING"]
            if len(pending_jobs) > 0:
                for job in pending_jobs:
                    if _check_job_binary_referenced(
                            context, session, jb_id, job.job_id):
                        raise ex.UpdateFailedException(
                            jb_id,
                            _("JobBinary Id '%s' is used in a PENDING job "
                              "and can not be updated."))
            jb.update(values)
    except db_exc.DBDuplicateEntry as e:
        raise ex.DBDuplicateEntry(
            _("Duplicate entry for JobBinary: %s") % e.columns)

    return jb
Ejemplo n.º 23
0
    def test_public_on_protected(self):
        prot = FakeObject(True, {"cat": 1,
                                 "dog": 2})
        values = {"cat": 3, "dog": 2}

        # Should raise because prot.is_protected is True
        with testtools.ExpectedException(ex.UpdateFailedException):
            acl.check_protected_from_update(prot, values)

        # Should not raise because values turns is_protected off
        values["is_protected"] = False
        acl.check_protected_from_update(prot, values)

        # Should be allowed because is_public is the only thing
        # that is potentially changing
        values = {"cat": 1, "dog": 2, "is_public": True}
        acl.check_protected_from_update(prot, values)

        values["cat"] = 3
        # Should raise because we are trying to change cat, too
        with testtools.ExpectedException(ex.UpdateFailedException):
            acl.check_protected_from_update(prot, values)
Ejemplo n.º 24
0
def check_job_execution_update(job_id, data, **kwargs):
    ctx = context.current()
    je = conductor.job_execution_get(ctx, job_id)

    acl.check_tenant_for_update(ctx, je)
    acl.check_protected_from_update(je, data)
Ejemplo n.º 25
0
def check_cluster_update(cluster_id, data, **kwargs):
    cluster = api.get_cluster(cluster_id)

    acl.check_tenant_for_update(context.current(), cluster)
    acl.check_protected_from_update(cluster, data)
Ejemplo n.º 26
0
def check_job_execution_update(job_execution_id, data, **kwargs):
    ctx = context.current()
    je = conductor.job_execution_get(ctx, job_execution_id)

    acl.check_tenant_for_update(ctx, je)
    acl.check_protected_from_update(je, data)