Example #1
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(
                            _LW('task run outlasted interval by %s sec') %
                            -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Example #2
0
    def run_periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        idle_for = DEFAULT_INTERVAL
        for task_name, task in self._periodic_tasks:
            full_task_name = '.'.join([self.__class__.__name__, task_name])

            now = timeutils.utcnow()
            spacing = self._periodic_spacing[task_name]
            last_run = self._periodic_last_run[task_name]

            # If a periodic task is _nearly_ due, then we'll run it early
            if spacing is not None and last_run is not None:
                due = last_run + datetime.timedelta(seconds=spacing)
                if not timeutils.is_soon(due, 0.2):
                    idle_for = min(idle_for, timeutils.delta_seconds(now, due))
                    continue

            if spacing is not None:
                idle_for = min(idle_for, spacing)

            LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
            self._periodic_last_run[task_name] = timeutils.utcnow()

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
                              locals())
            time.sleep(0)

        return idle_for
Example #3
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_LW('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Example #4
0
File: pxe.py Project: schatt/ironic
        def _wait_for_deploy():
            """Called at an interval until the deployment completes."""
            try:
                row = db.bm_node_get(context, node['id'])
                if instance['uuid'] != row.get('instance_uuid'):
                    locals['error'] = _("Node associated with another instance"
                                        " while waiting for deploy of %s")
                    raise loopingcall.LoopingCallDone()

                status = row.get('task_state')
                if (status == states.DEPLOYING and locals['started'] is False):
                    LOG.info(
                        _("PXE deploy started for instance %s") %
                        instance['uuid'])
                    locals['started'] = True
                elif status in (states.DEPLOYDONE, states.ACTIVE):
                    LOG.info(
                        _("PXE deploy completed for instance %s") %
                        instance['uuid'])
                    raise loopingcall.LoopingCallDone()
                elif status == states.DEPLOYFAIL:
                    locals['error'] = _("PXE deploy failed for instance %s")
            except exception.NodeNotFound:
                locals['error'] = _("Baremetal node deleted while waiting "
                                    "for deployment of instance %s")

            if (CONF.pxe_deploy_timeout and timeutils.utcnow() > expiration):
                locals['error'] = _("Timeout reached while waiting for "
                                    "PXE deploy of instance %s")
            if locals['error']:
                raise loopingcall.LoopingCallDone()
Example #5
0
        def _wait_for_deploy():
            """Called at an interval until the deployment completes."""
            try:
                row = db.bm_node_get(context, node['id'])
                if instance['uuid'] != row.get('instance_uuid'):
                    locals['error'] = _("Node associated with another instance"
                                        " while waiting for deploy of %s")
                    raise loopingcall.LoopingCallDone()

                status = row.get('task_state')
                if (status == states.DEPLOYING
                        and locals['started'] is False):
                    LOG.info(_("PXE deploy started for instance %s")
                                % instance['uuid'])
                    locals['started'] = True
                elif status in (states.DEPLOYDONE,
                                states.ACTIVE):
                    LOG.info(_("PXE deploy completed for instance %s")
                                % instance['uuid'])
                    raise loopingcall.LoopingCallDone()
                elif status == states.DEPLOYFAIL:
                    locals['error'] = _("PXE deploy failed for instance %s")
            except exception.NodeNotFound:
                locals['error'] = _("Baremetal node deleted while waiting "
                                    "for deployment of instance %s")

            if (CONF.pxe_deploy_timeout and
                    timeutils.utcnow() > expiration):
                locals['error'] = _("Timeout reached while waiting for "
                                     "PXE deploy of instance %s")
            if locals['error']:
                raise loopingcall.LoopingCallDone()
Example #6
0
    def _add_nodes_filters(self, query, filters):
        if filters is None:
            filters = []

        if 'chassis_uuid' in filters:
            # get_chassis() to raise an exception if the chassis is not found
            chassis_obj = self.get_chassis(filters['chassis_uuid'])
            query = query.filter_by(chassis_id=chassis_obj.id)
        if 'associated' in filters:
            if filters['associated']:
                query = query.filter(models.Node.instance_uuid != None)
            else:
                query = query.filter(models.Node.instance_uuid == None)
        if 'reserved' in filters:
            if filters['reserved']:
                query = query.filter(models.Node.reservation != None)
            else:
                query = query.filter(models.Node.reservation == None)
        if 'maintenance' in filters:
            query = query.filter_by(maintenance=filters['maintenance'])
        if 'driver' in filters:
            query = query.filter_by(driver=filters['driver'])
        if 'provision_state' in filters:
            query = query.filter_by(provision_state=filters['provision_state'])
        if 'provisioned_before' in filters:
            limit = timeutils.utcnow() - datetime.timedelta(
                                         seconds=filters['provisioned_before'])
            query = query.filter(models.Node.provision_updated_at < limit)

        return query
Example #7
0
    def _add_nodes_filters(self, query, filters):
        if filters is None:
            filters = []

        if 'chassis_uuid' in filters:
            # get_chassis() to raise an exception if the chassis is not found
            chassis_obj = self.get_chassis(filters['chassis_uuid'])
            query = query.filter_by(chassis_id=chassis_obj.id)
        if 'associated' in filters:
            if filters['associated']:
                query = query.filter(models.Node.instance_uuid != None)
            else:
                query = query.filter(models.Node.instance_uuid == None)
        if 'reserved' in filters:
            if filters['reserved']:
                query = query.filter(models.Node.reservation != None)
            else:
                query = query.filter(models.Node.reservation == None)
        if 'maintenance' in filters:
            query = query.filter_by(maintenance=filters['maintenance'])
        if 'driver' in filters:
            query = query.filter_by(driver=filters['driver'])
        if 'provision_state' in filters:
            query = query.filter_by(provision_state=filters['provision_state'])
        if 'provisioned_before' in filters:
            limit = timeutils.utcnow() - datetime.timedelta(
                seconds=filters['provisioned_before'])
            query = query.filter(models.Node.provision_updated_at < limit)

        return query
Example #8
0
        def _wait_for_deploy():
            """Called at an interval until the deployment completes."""
            try:
                node.refresh()
                status = node['provision_state']
                if (status == states.DEPLOYING
                    and local_status['started'] is False):
                    LOG.info(_("PXE deploy started for instance %s")
                                % node['instance_uuid'])
                    local_status['started'] = True
                elif status in (states.DEPLOYDONE,
                                states.ACTIVE):
                    LOG.info(_("PXE deploy completed for instance %s")
                                % node['instance_uuid'])
                    raise loopingcall.LoopingCallDone()
                elif status == states.DEPLOYFAIL:
                    local_status['error'] = _("PXE deploy failed for"
                                              " instance %s")
            except exception.NodeNotFound:
                local_status['error'] = _("Baremetal node deleted"
                                          "while waiting for deployment"
                                          " of instance %s")

            if (CONF.pxe.pxe_deploy_timeout and
                    timeutils.utcnow() > expiration):
                local_status['error'] = _("Timeout reached while waiting for "
                                     "PXE deploy of instance %s")
            if local_status['error']:
                raise loopingcall.LoopingCallDone()
Example #9
0
 def soft_delete(self, synchronize_session='evaluate'):
     return self.update(
         {
             'deleted': literal_column('id'),
             'updated_at': literal_column('updated_at'),
             'deleted_at': timeutils.utcnow()
         },
         synchronize_session=synchronize_session)
Example #10
0
 def touch_conductor(self, hostname):
     session = get_session()
     with session.begin():
         query = model_query(models.Conductor, session=session).filter_by(hostname=hostname)
         # since we're not changing any other field, manually set updated_at
         count = query.update({"updated_at": timeutils.utcnow()})
         if count == 0:
             raise exception.ConductorNotFound(conductor=hostname)
Example #11
0
 def touch_conductor(self, hostname):
     session = get_session()
     with session.begin():
         query = model_query(models.Conductor, session=session).\
                     filter_by(hostname=hostname)
         # since we're not changing any other field, manually set updated_at
         count = query.update({'updated_at': timeutils.utcnow()})
         if count == 0:
             raise exception.ConductorNotFound(conductor=hostname)
Example #12
0
    def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
                 roles=None, remote_address=None, timestamp=None,
                 request_id=None, auth_token=None, overwrite=True,
                 quota_class=None, user_name=None, project_name=None,
                 service_catalog=None, instance_lock_checked=False, **kwargs):
        """Initialize this RequestContext.

        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.

        :param kwargs: Extra arguments that might be present, but we ignore
            because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warn(_('Arguments dropped when creating context: %s') %
                    str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, basestring):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [s for s in service_catalog
                if s.get('type') in ('volume')]
        else:
            # if list is empty or none
            self.service_catalog = []

        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class
        self.user_name = user_name
        self.project_name = project_name
        self.is_admin = is_admin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self)
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()
Example #13
0
    def list_active_conductor_drivers(self, interval):
        # TODO(deva): add configurable default 'interval', somewhere higher
        #             up the code. This isn't a db-specific option.
        limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
        result = model_query(models.Conductor).filter(models.Conductor.updated_at >= limit).all()

        driver_set = set()
        for row in result:
            driver_set.update(set(row["drivers"]))
        return list(driver_set)
Example #14
0
    def deploy(self, task, node):
        """Perform a deployment to a node.

        Given a node with complete metadata, deploy the indicated image
        to the node.

        :param task: a TaskManager instance.
        :param node: the Node to act upon.
        """

        pxe_info = _get_tftp_image_info(node)

        _create_pxe_config(task, node, pxe_info)
        _cache_images(node, pxe_info)

        local_status = {'error': '', 'started': False}

        def _wait_for_deploy():
            """Called at an interval until the deployment completes."""
            try:
                node.refresh()
                status = node['provision_state']
                if (status == states.DEPLOYING
                    and local_status['started'] is False):
                    LOG.info(_("PXE deploy started for instance %s")
                                % node['instance_uuid'])
                    local_status['started'] = True
                elif status in (states.DEPLOYDONE,
                                states.ACTIVE):
                    LOG.info(_("PXE deploy completed for instance %s")
                                % node['instance_uuid'])
                    raise loopingcall.LoopingCallDone()
                elif status == states.DEPLOYFAIL:
                    local_status['error'] = _("PXE deploy failed for"
                                              " instance %s")
            except exception.NodeNotFound:
                local_status['error'] = _("Baremetal node deleted"
                                          "while waiting for deployment"
                                          " of instance %s")

            if (CONF.pxe.pxe_deploy_timeout and
                    timeutils.utcnow() > expiration):
                local_status['error'] = _("Timeout reached while waiting for "
                                     "PXE deploy of instance %s")
            if local_status['error']:
                raise loopingcall.LoopingCallDone()

        expiration = timeutils.utcnow() + datetime.timedelta(
                            seconds=CONF.pxe.pxe_deploy_timeout)
        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
        timer.start(interval=1).wait()

        if local_status['error']:
            raise exception.InstanceDeployFailure(
                    local_status['error'] % node['instance_uuid'])
Example #15
0
File: api.py Project: epim/ironic
def notify(context, publisher_id, event_type, priority, payload):
    """Sends a notification using the specified driver

    :param publisher_id: the source worker_type.host of the message
    :param event_type:   the literal type of event (ex. Instance Creation)
    :param priority:     patterned after the enumeration of Python logging
                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    :param payload:       A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id
      a UUID representing the id for this notification

    timestamp
      the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example::

        {'message_id': str(uuid.uuid4()),
         'publisher_id': 'compute.host1',
         'timestamp': timeutils.utcnow(),
         'priority': 'WARN',
         'event_type': 'compute.create_instance',
         'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(_("%s not in valid priorities") % priority)

    # Ensure everything is JSON serializable.
    payload = jsonutils.to_primitive(payload, convert_instances=True)

    msg = dict(
        message_id=str(uuid.uuid4()),
        publisher_id=publisher_id,
        event_type=event_type,
        priority=priority,
        payload=payload,
        timestamp=str(timeutils.utcnow()),
    )

    for driver in _get_drivers():
        try:
            driver.notify(context, msg)
        except Exception as e:
            LOG.exception(
                _("Problem '%(e)s' attempting to " "send to notification system. " "Payload=%(payload)s")
                % dict(e=e, payload=payload)
            )
Example #16
0
 def register_conductor(self, values):
     try:
         conductor = models.Conductor()
         conductor.update(values)
         # NOTE(deva): ensure updated_at field has a non-null initial value
         if not conductor.get("updated_at"):
             conductor.update({"updated_at": timeutils.utcnow()})
         conductor.save()
         return conductor
     except db_exc.DBDuplicateEntry:
         raise exception.ConductorAlreadyRegistered(conductor=values["hostname"])
Example #17
0
def notify(context, publisher_id, event_type, priority, payload):
    """Sends a notification using the specified driver

    :param publisher_id: the source worker_type.host of the message
    :param event_type:   the literal type of event (ex. Instance Creation)
    :param priority:     patterned after the enumeration of Python logging
                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    :param payload:       A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id
      a UUID representing the id for this notification

    timestamp
      the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example::

        {'message_id': str(uuid.uuid4()),
         'publisher_id': 'compute.host1',
         'timestamp': timeutils.utcnow(),
         'priority': 'WARN',
         'event_type': 'compute.create_instance',
         'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(_('%s not in valid priorities') % priority)

    # Ensure everything is JSON serializable.
    payload = jsonutils.to_primitive(payload, convert_instances=True)

    msg = dict(message_id=str(uuid.uuid4()),
               publisher_id=publisher_id,
               event_type=event_type,
               priority=priority,
               payload=payload,
               timestamp=str(timeutils.utcnow()))

    for driver in _get_drivers():
        try:
            driver.notify(context, msg)
        except Exception as e:
            LOG.exception(
                _("Problem '%(e)s' attempting to "
                  "send to notification system. "
                  "Payload=%(payload)s") % dict(e=e, payload=payload))
Example #18
0
 def register_conductor(self, values):
     try:
         conductor = models.Conductor()
         conductor.update(values)
         # NOTE(deva): ensure updated_at field has a non-null initial value
         if not conductor.get('updated_at'):
             conductor.update({'updated_at': timeutils.utcnow()})
         conductor.save()
         return conductor
     except db_exc.DBDuplicateEntry:
         raise exception.ConductorAlreadyRegistered(
             conductor=values['hostname'])
Example #19
0
File: api.py Project: nkaul/ironic
    def get_active_driver_dict(self, interval=None):
        if interval is None:
            interval = CONF.conductor.heartbeat_timeout

        limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
        result = model_query(models.Conductor).filter(models.Conductor.updated_at >= limit).all()

        # build mapping of drivers to the set of hosts which support them
        d2c = collections.defaultdict(set)
        for row in result:
            for driver in row["drivers"]:
                d2c[driver].add(row["hostname"])
        return d2c
Example #20
0
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
                                          use_soft_delete, *uc_column_names):
    """Drop all old rows having the same values for columns in uc_columns.

    This method drop (or mark ad `deleted` if use_soft_delete is True) old
    duplicate rows form table with name `table_name`.

    :param migrate_engine:  Sqlalchemy engine
    :param table_name:      Table with duplicates
    :param use_soft_delete: If True - values will be marked as `deleted`,
                            if False - values will be removed from table
    :param uc_column_names: Unique constraint columns
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    columns_for_group_by = [table.c[name] for name in uc_column_names]

    columns_for_select = [func.max(table.c.id)]
    columns_for_select.extend(columns_for_group_by)

    duplicated_rows_select = select(columns_for_select,
                                    group_by=columns_for_group_by,
                                    having=func.count(table.c.id) > 1)

    for row in migrate_engine.execute(duplicated_rows_select):
        # NOTE(boris-42): Do not remove row that has the biggest ID.
        delete_condition = table.c.id != row[0]
        is_none = None  # workaround for pyflakes
        delete_condition &= table.c.deleted_at == is_none
        for name in uc_column_names:
            delete_condition &= table.c[name] == row[name]

        rows_to_delete_select = select([table.c.id]).where(delete_condition)
        for row in migrate_engine.execute(rows_to_delete_select).fetchall():
            LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
                         "%(table)s") % dict(id=row[0], table=table_name))

        if use_soft_delete:
            delete_statement = table.update().\
                where(delete_condition).\
                values({
                    'deleted': literal_column('id'),
                    'updated_at': literal_column('updated_at'),
                    'deleted_at': timeutils.utcnow()
                })
        else:
            delete_statement = table.delete().where(delete_condition)
        migrate_engine.execute(delete_statement)
Example #21
0
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
                                          use_soft_delete, *uc_column_names):
    """Drop all old rows having the same values for columns in uc_columns.

    This method drop (or mark ad `deleted` if use_soft_delete is True) old
    duplicate rows form table with name `table_name`.

    :param migrate_engine:  Sqlalchemy engine
    :param table_name:      Table with duplicates
    :param use_soft_delete: If True - values will be marked as `deleted`,
                            if False - values will be removed from table
    :param uc_column_names: Unique constraint columns
    """
    meta = MetaData()
    meta.bind = migrate_engine

    table = Table(table_name, meta, autoload=True)
    columns_for_group_by = [table.c[name] for name in uc_column_names]

    columns_for_select = [func.max(table.c.id)]
    columns_for_select.extend(columns_for_group_by)

    duplicated_rows_select = select(columns_for_select,
                                    group_by=columns_for_group_by,
                                    having=func.count(table.c.id) > 1)

    for row in migrate_engine.execute(duplicated_rows_select):
        # NOTE(boris-42): Do not remove row that has the biggest ID.
        delete_condition = table.c.id != row[0]
        is_none = None  # workaround for pyflakes
        delete_condition &= table.c.deleted_at == is_none
        for name in uc_column_names:
            delete_condition &= table.c[name] == row[name]

        rows_to_delete_select = select([table.c.id]).where(delete_condition)
        for row in migrate_engine.execute(rows_to_delete_select).fetchall():
            LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
                       "%(table)s") % dict(id=row[0], table=table_name))

        if use_soft_delete:
            delete_statement = table.update().\
                where(delete_condition).\
                values({
                    'deleted': literal_column('id'),
                    'updated_at': literal_column('updated_at'),
                    'deleted_at': timeutils.utcnow()
                })
        else:
            delete_statement = table.delete().where(delete_condition)
        migrate_engine.execute(delete_statement)
Example #22
0
    def get_active_driver_dict(self, interval=None):
        if interval is None:
            interval = CONF.conductor.heartbeat_timeout

        limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
        result = model_query(models.Conductor).\
                    filter(models.Conductor.updated_at >= limit).\
                    all()

        # build mapping of drivers to the set of hosts which support them
        d2c = collections.defaultdict(set)
        for row in result:
            for driver in row['drivers']:
                d2c[driver].add(row['hostname'])
        return d2c
Example #23
0
    def activate_node(self, context, node, instance):
        """Wait for PXE deployment to complete."""

        locals = {'error': '', 'started': False}

        def _wait_for_deploy():
            """Called at an interval until the deployment completes."""
            try:
                row = db.bm_node_get(context, node['id'])
                if instance['uuid'] != row.get('instance_uuid'):
                    locals['error'] = _("Node associated with another instance"
                                        " while waiting for deploy of %s")
                    raise loopingcall.LoopingCallDone()

                status = row.get('task_state')
                if (status == states.DEPLOYING
                        and locals['started'] is False):
                    LOG.info(_("PXE deploy started for instance %s")
                                % instance['uuid'])
                    locals['started'] = True
                elif status in (states.DEPLOYDONE,
                                states.ACTIVE):
                    LOG.info(_("PXE deploy completed for instance %s")
                                % instance['uuid'])
                    raise loopingcall.LoopingCallDone()
                elif status == states.DEPLOYFAIL:
                    locals['error'] = _("PXE deploy failed for instance %s")
            except exception.NodeNotFound:
                locals['error'] = _("Baremetal node deleted while waiting "
                                    "for deployment of instance %s")

            if (CONF.pxe_deploy_timeout and
                    timeutils.utcnow() > expiration):
                locals['error'] = _("Timeout reached while waiting for "
                                     "PXE deploy of instance %s")
            if locals['error']:
                raise loopingcall.LoopingCallDone()

        expiration = timeutils.utcnow() + datetime.timedelta(
                            seconds=CONF.pxe_deploy_timeout)
        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
        timer.start(interval=1).wait()

        if locals['error']:
            raise exception.InstanceDeployFailure(
                    locals['error'] % instance['uuid'])
Example #24
0
File: pxe.py Project: schatt/ironic
    def activate_node(self, context, node, instance):
        """Wait for PXE deployment to complete."""

        locals = {'error': '', 'started': False}

        def _wait_for_deploy():
            """Called at an interval until the deployment completes."""
            try:
                row = db.bm_node_get(context, node['id'])
                if instance['uuid'] != row.get('instance_uuid'):
                    locals['error'] = _("Node associated with another instance"
                                        " while waiting for deploy of %s")
                    raise loopingcall.LoopingCallDone()

                status = row.get('task_state')
                if (status == states.DEPLOYING and locals['started'] is False):
                    LOG.info(
                        _("PXE deploy started for instance %s") %
                        instance['uuid'])
                    locals['started'] = True
                elif status in (states.DEPLOYDONE, states.ACTIVE):
                    LOG.info(
                        _("PXE deploy completed for instance %s") %
                        instance['uuid'])
                    raise loopingcall.LoopingCallDone()
                elif status == states.DEPLOYFAIL:
                    locals['error'] = _("PXE deploy failed for instance %s")
            except exception.NodeNotFound:
                locals['error'] = _("Baremetal node deleted while waiting "
                                    "for deployment of instance %s")

            if (CONF.pxe_deploy_timeout and timeutils.utcnow() > expiration):
                locals['error'] = _("Timeout reached while waiting for "
                                    "PXE deploy of instance %s")
            if locals['error']:
                raise loopingcall.LoopingCallDone()

        expiration = timeutils.utcnow() + datetime.timedelta(
            seconds=CONF.pxe_deploy_timeout)
        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
        timer.start(interval=1).wait()

        if locals['error']:
            raise exception.InstanceDeployFailure(locals['error'] %
                                                  instance['uuid'])
Example #25
0
File: api.py Project: nkaul/ironic
    def update_node(self, node_id, values):
        session = get_session()
        with session.begin():
            query = model_query(models.Node, session=session)
            query = add_identity_filter(query, node_id)
            try:
                ref = query.with_lockmode("update").one()
            except NoResultFound:
                raise exception.NodeNotFound(node=node_id)

            # Prevent instance_uuid overwriting
            if values.get("instance_uuid") and ref.instance_uuid:
                raise exception.NodeAssociated(node=node_id, instance=ref.instance_uuid)

            if "provision_state" in values:
                values["provision_updated_at"] = timeutils.utcnow()

            ref.update(values)
        return ref
Example #26
0
    def update_node(self, node_id, values):
        session = get_session()
        with session.begin():
            query = model_query(models.Node, session=session)
            query = add_identity_filter(query, node_id)
            try:
                ref = query.with_lockmode('update').one()
            except NoResultFound:
                raise exception.NodeNotFound(node=node_id)

            # Prevent instance_uuid overwriting
            if values.get("instance_uuid") and ref.instance_uuid:
                raise exception.NodeAssociated(node=node_id,
                                               instance=ref.instance_uuid)

            if 'provision_state' in values:
                values['provision_updated_at'] = timeutils.utcnow()

            ref.update(values)
        return ref
Example #27
0
    def decorator(f):
        # Test for old style invocation
        if 'ticks_between_runs' in kwargs:
            raise InvalidPeriodicTaskArg(arg='ticks_between_runs')

        # Control if run at all
        f._periodic_task = True
        f._periodic_external_ok = kwargs.pop('external_process_ok', False)
        if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
            f._periodic_enabled = False
        else:
            f._periodic_enabled = kwargs.pop('enabled', True)

        # Control frequency
        f._periodic_spacing = kwargs.pop('spacing', 0)
        f._periodic_immediate = kwargs.pop('run_immediately', False)
        if f._periodic_immediate:
            f._periodic_last_run = None
        else:
            f._periodic_last_run = timeutils.utcnow()
        return f
Example #28
0
    def decorator(f):
        # Test for old style invocation
        if 'ticks_between_runs' in kwargs:
            raise InvalidPeriodicTaskArg(arg='ticks_between_runs')

        # Control if run at all
        f._periodic_task = True
        f._periodic_external_ok = kwargs.pop('external_process_ok', False)
        if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
            f._periodic_enabled = False
        else:
            f._periodic_enabled = kwargs.pop('enabled', True)

        # Control frequency
        f._periodic_spacing = kwargs.pop('spacing', 0)
        f._periodic_immediate = kwargs.pop('run_immediately', False)
        if f._periodic_immediate:
            f._periodic_last_run = None
        else:
            f._periodic_last_run = timeutils.utcnow()
        return f
Example #29
0
    def _check_deploy_timeouts(self, context):
        if not CONF.conductor.deploy_callback_timeout:
            return

        filters = {'reserved': False, 'maintenance': False}
        columns = ['uuid', 'driver', 'provision_state', 'provision_updated_at']
        node_list = self.dbapi.get_nodeinfo_list(columns=columns,
                                                 filters=filters)

        for (node_uuid, driver, state, update_time) in node_list:
            mapped_hosts = self.driver_rings[driver].get_hosts(node_uuid)
            if self.host not in mapped_hosts:
                continue

            if state == states.DEPLOYWAIT:
                limit = (timeutils.utcnow() - datetime.timedelta(
                         seconds=CONF.conductor.deploy_callback_timeout))
                if timeutils.normalize_time(update_time) <= limit:
                    try:
                        task = task_manager.TaskManager(context, node_uuid)
                    except (exception.NodeLocked, exception.NodeNotFound):
                        continue

                    node = task.node
                    node.provision_state = states.DEPLOYFAIL
                    node.target_provision_state = states.NOSTATE
                    msg = (_('Timeout reached when waiting callback for '
                             'node %s') % node_uuid)
                    node.last_error = msg
                    LOG.error(msg)
                    node.save(task.context)

                    try:
                        thread = self._spawn_worker(
                                            utils.cleanup_after_timeout, task)
                        thread.link(lambda t: task.release_resources())
                    except exception.NoFreeConductorWorker:
                        task.release_resources()
Example #30
0
    def _check_deploy_timeouts(self, context):
        if not CONF.conductor.deploy_callback_timeout:
            return

        filters = {'reserved': False, 'maintenance': False}
        columns = ['uuid', 'driver', 'provision_state', 'provision_updated_at']
        node_list = self.dbapi.get_nodeinfo_list(columns=columns,
                                                 filters=filters)

        for (node_uuid, driver, state, update_time) in node_list:
            mapped_hosts = self.driver_rings[driver].get_hosts(node_uuid)
            if self.host not in mapped_hosts:
                continue

            if state == states.DEPLOYWAIT:
                limit = (timeutils.utcnow() - datetime.timedelta(
                    seconds=CONF.conductor.deploy_callback_timeout))
                if timeutils.normalize_time(update_time) <= limit:
                    try:
                        task = task_manager.TaskManager(context, node_uuid)
                    except (exception.NodeLocked, exception.NodeNotFound):
                        continue

                    node = task.node
                    node.provision_state = states.DEPLOYFAIL
                    node.target_provision_state = states.NOSTATE
                    msg = (_('Timeout reached when waiting callback for '
                             'node %s') % node_uuid)
                    node.last_error = msg
                    LOG.error(msg)
                    node.save(task.context)

                    try:
                        thread = self._spawn_worker(
                            utils.cleanup_after_timeout, task)
                        thread.link(lambda t: task.release_resources())
                    except exception.NoFreeConductorWorker:
                        task.release_resources()
Example #31
0
 def setUp(self):
     super(TestConductorObject, self).setUp()
     self.fake_conductor = utils.get_test_conductor(
         updated_at=timeutils.utcnow())
     self.dbapi = db_api.get_instance()
Example #32
0
 def setUp(self):
     super(TestConductorObject, self).setUp()
     self.fake_conductor = utils.get_test_conductor(
                                     updated_at=timeutils.utcnow())
     self.dbapi = db_api.get_instance()
Example #33
0
 def soft_delete(self, session=None):
     """Mark this object as deleted."""
     self.deleted = self.id
     self.deleted_at = timeutils.utcnow()
     self.save(session=session)
Example #34
0
 def soft_delete(self, session=None):
     """Mark this object as deleted."""
     self.deleted = self.id
     self.deleted_at = timeutils.utcnow()
     self.save(session=session)
Example #35
0
 def soft_delete(self, synchronize_session='evaluate'):
     return self.update({'deleted': literal_column('id'),
                         'updated_at': literal_column('updated_at'),
                         'deleted_at': timeutils.utcnow()},
                        synchronize_session=synchronize_session)
Example #36
0
    def __init__(self,
                 user_id,
                 project_id,
                 is_admin=None,
                 read_deleted="no",
                 roles=None,
                 remote_address=None,
                 timestamp=None,
                 request_id=None,
                 auth_token=None,
                 overwrite=True,
                 quota_class=None,
                 user_name=None,
                 project_name=None,
                 service_catalog=None,
                 instance_lock_checked=False,
                 **kwargs):
        """Initialize this RequestContext.

        :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
            indicates deleted records are visible, 'only' indicates that
            *only* deleted records are visible.

        :param overwrite: Set to False to ensure that the greenthread local
            copy of the index is not overwritten.

        :param kwargs: Extra arguments that might be present, but we ignore
            because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warn(
                _('Arguments dropped when creating context: %s') % str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, basestring):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [
                s for s in service_catalog if s.get('type') in ('volume')
            ]
        else:
            # if list is empty or none
            self.service_catalog = []

        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class
        self.user_name = user_name
        self.project_name = project_name
        self.is_admin = is_admin
        if self.is_admin is None:
            self.is_admin = policy.check_is_admin(self)
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()
Example #37
0
class TimestampMixin(object):
    created_at = Column(DateTime, default=lambda: timeutils.utcnow())
    updated_at = Column(DateTime, onupdate=lambda: timeutils.utcnow())