def process_create_subnetpool(self, plugin_context, data, result):
     session = plugin_context.session
     value = self._get_value(data, 'subnetpool_extension')
     record = TestSubnetPoolExtension(subnetpool_id=result['id'],
                                      value=value)
     session.add(record)
     result['subnetpool_extension'] = value
Пример #2
0
    def _set_metrics_for_resource(session, r, metrics):
        for name, value in six.iteritems(metrics):
            if isinstance(value, uuid.UUID):
                try:
                    update = session.query(Metric).filter(
                        Metric.id == value,
                        (Metric.created_by_user_id
                         == r.created_by_user_id),
                        (Metric.created_by_project_id
                         == r.created_by_project_id),
                    ).update({"resource_id": r.id, "name": name})
                except exception.DBDuplicateEntry:
                    raise indexer.NamedMetricAlreadyExists(name)
                if update == 0:
                    raise indexer.NoSuchMetric(value)
            else:
                ap_name = value['archive_policy_name']
                m = Metric(id=uuid.uuid4(),
                           created_by_user_id=r.created_by_user_id,
                           created_by_project_id=r.created_by_project_id,
                           archive_policy_name=ap_name,
                           name=name,
                           resource_id=r.id)
                session.add(m)
                try:
                    session.flush()
                except exception.DBDuplicateEntry:
                    raise indexer.NamedMetricAlreadyExists(name)
                except exception.DBReferenceError as e:
                    if (e.constraint ==
                       'fk_metric_archive_policy_name_archive_policy_name'):
                        raise indexer.NoSuchArchivePolicy(ap_name)
                    raise

        session.expire(r, ['metrics'])
Пример #3
0
def create_volume():
    ed_user = Volume(name='vmax_vol',
                     fullname='vmax_vol pool 1 ',
                     pool='pool1')
    # Let's add the user and its addresses we've created to the DB and commit.
    session.add(ed_user)
    session.commit()
Пример #4
0
 def _add_secret(self, session, project, name, created_at, updated_at):
     s = models.Secret()
     s.name = name
     s.created_at = timeutils.parse_isotime(created_at)
     s.updated_at = timeutils.parse_isotime(updated_at)
     s.project_id = project.id
     session.add(s)
Пример #5
0
 def _add_secret(self, session, project, name, created_at, updated_at):
     s = models.Secret()
     s.name = name
     s.created_at = timeutils.parse_isotime(created_at)
     s.updated_at = timeutils.parse_isotime(updated_at)
     s.project_id = project.id
     session.add(s)
 def process_create_address_scope(self, plugin_context, data, result):
     session = plugin_context.session
     value = self._get_value(data, 'address_scope_extension')
     record = TestAddressScopeExtension(address_scope_id=result['id'],
                                        value=value)
     session.add(record)
     result['address_scope_extension'] = value
Пример #7
0
def create_user():
    ed_user = User(name='ed', fullname='Ed Jones', password='******')
    ed_user.addresses = [
        Address(email_address='*****@*****.**'),
        Address(email_address='*****@*****.**')
    ]

    # Let's add the user and its addresses we've created to the DB and commit.
    session.add(ed_user)
    session.commit()
Пример #8
0
def alert_source_create(context, values):
    """Add an alert source configuration."""
    alert_source_ref = models.AlertSource()
    alert_source_ref.update(values)

    session = get_session()
    with session.begin():
        session.add(alert_source_ref)

    return _alert_source_get(context,
                             alert_source_ref['storage_id'],
                             session=session)
Пример #9
0
def pool_create(context, values):
    """Create a pool from the values dictionary."""
    if not values.get('id'):
        values['id'] = uuidutils.generate_uuid()

    pool_ref = models.Pool()
    pool_ref.update(values)

    session = get_session()
    with session.begin():
        session.add(pool_ref)

    return _pool_get(context, pool_ref['id'], session=session)
Пример #10
0
def disk_create(context, values):
    """Create a disk from the values dictionary."""
    if not values.get('id'):
        values['id'] = uuidutils.generate_uuid()

    disk_ref = models.Disk()
    disk_ref.update(values)

    session = get_session()
    with session.begin():
        session.add(disk_ref)

    return _disk_get(context, disk_ref['id'], session=session)
Пример #11
0
def storage_create(context, values):
    """Add a storage device from the values dictionary."""
    if not values.get('id'):
        values['id'] = uuidutils.generate_uuid()

    storage_ref = models.Storage()
    storage_ref.update(values)

    session = get_session()
    with session.begin():
        session.add(storage_ref)

    return _storage_get(context, storage_ref['id'], session=session)
Пример #12
0
def volume_create(context, values):
    """Create a volume."""
    if not values.get('id'):
        values['id'] = uuidutils.generate_uuid()

    vol_ref = models.Volume()
    vol_ref.update(values)

    session = get_session()
    with session.begin():
        session.add(vol_ref)

    return _volume_get(context, vol_ref['id'], session=session)
Пример #13
0
def controller_create(context, values):
    """Create a controller from the values dictionary."""
    if not values.get('id'):
        values['id'] = uuidutils.generate_uuid()

    controller_ref = models.Controller()
    controller_ref.update(values)

    session = get_session()
    with session.begin():
        session.add(controller_ref)

    return _controller_get(context, controller_ref['id'], session=session)
Пример #14
0
 def create_archive_policy(self, archive_policy):
     ap = ArchivePolicy(
         name=archive_policy.name,
         back_window=archive_policy.back_window,
         definition=archive_policy.definition,
         aggregation_methods=list(archive_policy.aggregation_methods),
     )
     session = self.engine_facade.get_session()
     session.add(ap)
     try:
         session.flush()
     except exception.DBDuplicateEntry:
         raise indexer.ArchivePolicyAlreadyExists(archive_policy.name)
     return ap
Пример #15
0
 def create_archive_policy_rule(self, name, metric_pattern,
                                archive_policy_name):
     apr = ArchivePolicyRule(
         name=name,
         archive_policy_name=archive_policy_name,
         metric_pattern=metric_pattern
     )
     session = self.engine_facade.get_session()
     session.add(apr)
     try:
         session.flush()
     except exception.DBDuplicateEntry:
         raise indexer.ArchivePolicyRuleAlreadyExists(name)
     return apr
Пример #16
0
def access_info_create(context, values):
    """Create a storage access information."""
    if not values.get('storage_id'):
        values['storage_id'] = uuidutils.generate_uuid()

    access_info_ref = models.AccessInfo()
    access_info_ref.update(values)

    session = get_session()
    with session.begin():
        session.add(access_info_ref)

    return _access_info_get(context,
                            access_info_ref['storage_id'],
                            session=session)
Пример #17
0
 def create_metric(self, id, created_by_user_id, created_by_project_id,
                   archive_policy_name,
                   name=None, resource_id=None,
                   details=False):
     m = Metric(id=id,
                created_by_user_id=created_by_user_id,
                created_by_project_id=created_by_project_id,
                archive_policy_name=archive_policy_name,
                name=name,
                resource_id=resource_id)
     session = self.engine_facade.get_session()
     session.add(m)
     session.flush()
     if details:
         # Fetch archive policy
         m.archive_policy
     return m
Пример #18
0
def save_blob_data_batch(context, blobs, session):
    """Perform batch uploading to database."""
    with session.begin():

        locations = []

        # blobs is a list of tuples (blob_data_id, data)
        for blob_data_id, data in blobs:
            blob_data = models.ArtifactBlobData()
            blob_data.id = blob_data_id
            blob_data.data = data.read()
            session.add(blob_data)
            locations.append("sql://" + blob_data.id)

        session.flush()

    return locations
Пример #19
0
def set_quotas(values, session):
    """Create new quota instances in database"""
    with session.begin():
        for project_id, project_quotas in values.items():

            # reset all project quotas
            session.query(models.ArtifactQuota).filter(
                models.ArtifactQuota.project_id == project_id).delete()

            # generate new quotas
            for quota_name, quota_value in project_quotas.items():
                q = models.ArtifactQuota()
                q.project_id = project_id
                q.quota_name = quota_name
                q.quota_value = quota_value
                session.add(q)

        # save all quotas
        session.flush()
Пример #20
0
 def create_metric(self, id, created_by_user_id, created_by_project_id,
                   archive_policy_name,
                   name=None, resource_id=None):
     m = Metric(id=id,
                created_by_user_id=created_by_user_id,
                created_by_project_id=created_by_project_id,
                archive_policy_name=archive_policy_name,
                name=name,
                resource_id=resource_id)
     session = self.engine_facade.get_session()
     session.add(m)
     try:
         session.flush()
     except exception.DBReferenceError as e:
         if (e.constraint ==
            'fk_metric_archive_policy_name_archive_policy_name'):
             raise indexer.NoSuchArchivePolicy(archive_policy_name)
         raise
     session.expunge_all()
     return m
Пример #21
0
    def create_resource(self, resource_type, id,
                        created_by_user_id, created_by_project_id,
                        user_id=None, project_id=None,
                        started_at=None, ended_at=None, metrics=None,
                        **kwargs):
        resource_cls = self._resource_type_to_class(resource_type)
        if (started_at is not None
           and ended_at is not None
           and started_at > ended_at):
            raise ValueError("Start timestamp cannot be after end timestamp")
        r = resource_cls(
            id=id,
            type=resource_type,
            created_by_user_id=created_by_user_id,
            created_by_project_id=created_by_project_id,
            user_id=user_id,
            project_id=project_id,
            started_at=started_at,
            ended_at=ended_at,
            **kwargs)
        session = self.engine_facade.get_session()
        with session.begin():
            session.add(r)
            try:
                session.flush()
            except exception.DBDuplicateEntry:
                raise indexer.ResourceAlreadyExists(id)
            except exception.DBReferenceError as ex:
                raise indexer.ResourceValueError(r.type,
                                                 ex.key,
                                                 getattr(r, ex.key))
            if metrics is not None:
                self._set_metrics_for_resource(session, r, metrics)

        # NOTE(jd) Force load of metrics :)
        r.metrics

        session.expunge_all()
        return r
Пример #22
0
 def process_create_port(self, plugin_context, data, result):
     session = plugin_context.session
     value = self._get_value(data, 'port_extension')
     record = TestPortExtension(port_id=result['id'], value=value)
     session.add(record)
     result['port_extension'] = value
Пример #23
0
    def update_resource(self, resource_type,
                        resource_id, ended_at=_marker, metrics=_marker,
                        append_metrics=False,
                        **kwargs):

        now = timeutils.utcnow()

        resource_cls = self._resource_type_to_class(resource_type)
        resource_history_cls = self._resource_type_to_class(resource_type,
                                                            "history")
        session = self.engine_facade.get_session()
        with session.begin():
            # NOTE(sileht): We use FOR UPDATE that is not galera friendly,
            # but they are no other way to cleanly patch a resource and store
            # the history that safe when two concurrent calls are done.
            q = session.query(resource_cls).filter(
                resource_cls.id == resource_id).with_for_update()

            r = q.first()
            if r is None:
                raise indexer.NoSuchResource(resource_id)

            # Build history
            rh = resource_history_cls()
            for col in sqlalchemy.inspect(resource_cls).columns:
                setattr(rh, col.name, getattr(r, col.name))
            rh.revision_end = now
            session.add(rh)

            # Update the resource
            if ended_at is not _marker:
                # NOTE(jd) Could be better to have check in the db for that so
                # we can just run the UPDATE
                if r.started_at is not None and ended_at is not None:
                    # Convert to UTC because we store in UTC :(
                    ended_at = timeutils.normalize_time(ended_at)
                    if r.started_at > ended_at:
                        raise ValueError(
                            "Start timestamp cannot be after end timestamp")
                r.ended_at = ended_at

            r.revision_start = now

            if kwargs:
                for attribute, value in six.iteritems(kwargs):
                    if hasattr(r, attribute):
                        setattr(r, attribute, value)
                    else:
                        raise indexer.ResourceAttributeError(
                            r.type, attribute)

            if metrics is not _marker:
                if not append_metrics:
                    session.query(Metric).filter(
                        Metric.resource_id == resource_id).update(
                            {"resource_id": None})
                self._set_metrics_for_resource(session, r, metrics)

        # NOTE(jd) Force load of metrics – do it outside the session!
        r.metrics

        return r
Пример #24
0
 def process_create_network(self, plugin_context, data, result):
     session = plugin_context.session
     value = self._get_value(data, 'network_extension')
     record = TestNetworkExtension(network_id=result['id'], value=value)
     session.add(record)
     result['network_extension'] = value
Пример #25
0
 def process_create_port(self, plugin_context, data, result):
     session = plugin_context.session
     value = self._get_value(data, 'port_extension')
     record = TestPortExtension(port_id=result['id'], value=value)
     session.add(record)
     result['port_extension'] = value
Пример #26
0
    def update_resource(self, resource_type,
                        resource_id, ended_at=_marker, metrics=_marker,
                        append_metrics=False,
                        **kwargs):

        now = utils.utcnow()

        resource_cls = self._resource_type_to_class(resource_type)
        resource_history_cls = self._resource_type_to_class(resource_type,
                                                            "history")
        session = self.engine_facade.get_session()
        try:
            with session.begin():
                # NOTE(sileht): We use FOR UPDATE that is not galera friendly,
                # but they are no other way to cleanly patch a resource and
                # store the history that safe when two concurrent calls are
                # done.
                q = session.query(resource_cls).filter(
                    resource_cls.id == resource_id).with_for_update()

                r = q.first()
                if r is None:
                    raise indexer.NoSuchResource(resource_id)

                # Build history
                rh = resource_history_cls()
                for col in sqlalchemy.inspect(resource_cls).columns:
                    setattr(rh, col.name, getattr(r, col.name))
                rh.revision_end = now
                session.add(rh)

                # Update the resource
                if ended_at is not _marker:
                    # NOTE(jd) MySQL does not honor checks. I hate it.
                    engine = self.engine_facade.get_engine()
                    if engine.dialect.name == "mysql":
                        if r.started_at is not None and ended_at is not None:
                            if r.started_at > ended_at:
                                raise indexer.ResourceValueError(
                                    resource_type, "ended_at", ended_at)
                    r.ended_at = ended_at

                r.revision_start = now

                if kwargs:
                    for attribute, value in six.iteritems(kwargs):
                        if hasattr(r, attribute):
                            setattr(r, attribute, value)
                        else:
                            raise indexer.ResourceAttributeError(
                                r.type, attribute)

                if metrics is not _marker:
                    if not append_metrics:
                        session.query(Metric).filter(
                            Metric.resource_id == resource_id,
                            Metric.status == 'active').update(
                                {"resource_id": None})
                    self._set_metrics_for_resource(session, r, metrics)
        except exception.DBConstraintError as e:
            if e.check_name == "ck_started_before_ended":
                raise indexer.ResourceValueError(
                    resource_type, "ended_at", ended_at)
            raise

        # NOTE(jd) Force load of metrics – do it outside the session!
        r.metrics

        session.expunge_all()
        return r
Пример #27
0
 def process_create_network(self, plugin_context, data, result):
     session = plugin_context.session
     value = self._get_value(data, 'network_extension')
     record = TestNetworkExtension(network_id=result['id'], value=value)
     session.add(record)
     result['network_extension'] = value