def ping(self, context):
     """Ping should be called periodically to update zone status."""
     diff = utils.utcnow() - self.last_zone_db_check
     if diff.seconds >= FLAGS.zone_db_check_interval:
         logging.debug(_("Updating zone cache from db."))
         self.last_zone_db_check = utils.utcnow()
         self._refresh_from_db(context)
     self._poll_zones(context)
Example #2
0
 def ping(self, context):
     """Ping should be called periodically to update zone status."""
     diff = utils.utcnow() - self.last_zone_db_check
     if diff.seconds >= FLAGS.zone_db_check_interval:
         logging.debug(_("Updating zone cache from db."))
         self.last_zone_db_check = utils.utcnow()
         self._refresh_from_db(context)
     self._poll_zones(context)
Example #3
0
def stub_instance(id, user_id='fake', project_id='fake', host=None,
                  vm_state=None, task_state=None,
                  reservation_id="", uuid=FAKE_UUID, image_ref="10",
                  flavor_id="1", name=None, key_name='',
                  access_ipv4=None, access_ipv6=None, progress=0):

    if host is not None:
        host = str(host)

    if key_name:
        key_data = 'FAKE'
    else:
        key_data = ''

    # ReservationID isn't sent back, hack it in there.
    server_name = name or "server%s" % id
    if reservation_id != "":
        server_name = "reservation_%s" % (reservation_id, )

    instance = {
        "id": int(id),
        "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
        "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
        "admin_pass": "",
        "user_id": user_id,
        "project_id": project_id,
        "image_ref": image_ref,
        "kernel_id": "",
        "ramdisk_id": "",
        "launch_index": 0,
        "key_name": key_name,
        "key_data": key_data,
        "vm_state": vm_state or vm_states.BUILDING,
        "task_state": task_state,
        "memory_mb": 0,
        "vcpus": 0,
        "local_gb": 0,
        "hostname": "",
        "host": host,
        "instance_type": {},
        "user_data": "",
        "reservation_id": reservation_id,
        "mac_address": "",
        "scheduled_at": utils.utcnow(),
        "launched_at": utils.utcnow(),
        "terminated_at": utils.utcnow(),
        "availability_zone": "",
        "display_name": server_name,
        "display_description": "",
        "locked": False,
        "metadata": [],
        "access_ip_v4": access_ipv4,
        "access_ip_v6": access_ipv6,
        "uuid": uuid,
        "progress": progress}

    return instance
Example #4
0
def notify_usage_exists(instance_ref, current_period=False):
    """ Generates 'exists' notification for an instance for usage auditing
        purposes.

        Generates usage for last completed period, unless 'current_period'
        is True."""
    admin_context = context.get_admin_context()
    begin, end = utils.current_audit_period()
    bw = {}
    if current_period:
        audit_start = end
        audit_end = utils.utcnow()
    else:
        audit_start = begin
        audit_end = end
    for b in db.bw_usage_get_by_instance(admin_context,
                                         instance_ref['id'],
                                         audit_start):
        bw[b.network_label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)
    usage_info = utils.usage_from_instance(instance_ref,
                          audit_period_beginning=str(audit_start),
                          audit_period_ending=str(audit_end),
                          bandwidth=bw)
    notifier_api.notify('compute.%s' % FLAGS.host,
                        'compute.instance.exists',
                        notifier_api.INFO,
                        usage_info)
Example #5
0
 def log_request_completion(self, response, request, start):
     apireq = request.environ.get('ec2.request', None)
     if apireq:
         controller = apireq.controller
         action = apireq.action
     else:
         controller = None
         action = None
     ctxt = request.environ.get('engine.context', None)
     delta = utils.utcnow() - start
     seconds = delta.seconds
     microseconds = delta.microseconds
     LOG.info(
         "%s.%ss %s %s %s %s:%s %s [%s] %s %s",
         seconds,
         microseconds,
         request.remote_addr,
         request.method,
         "%s%s" % (request.script_name, request.path_info),
         controller,
         action,
         response.status_int,
         request.user_agent,
         request.content_type,
         response.content_type,
         context=ctxt)
Example #6
0
 def describe_hosts(self, context, **_kwargs):
     """Returns status info for all nodes. Includes:
         * Hostname
         * Compute (up, down, None)
         * Instance count
         * Volume (up, down, None)
         * Volume Count
     """
     services = db.service_get_all(context, False)
     now = utils.utcnow()
     hosts = []
     rv = []
     for host in [service['host'] for service in services]:
         if not host in hosts:
             hosts.append(host)
     for host in hosts:
         compute = [s for s in services if s['host'] == host \
                                        and s['binary'] == 'engine-compute']
         if compute:
             compute = compute[0]
         instances = db.instance_get_all_by_host(context, host)
         volume = [s for s in services if s['host'] == host \
                                        and s['binary'] == 'engine-volume']
         if volume:
             volume = volume[0]
         volumes = db.volume_get_all_by_host(context, host)
         rv.append(host_dict(host, compute, instances, volume, volumes,
                             now))
     return {'hosts': rv}
Example #7
0
    def _provision_volume(self, context, vol, vsa_id, availability_zone):

        if availability_zone is None:
            availability_zone = FLAGS.storage_availability_zone

        now = utils.utcnow()
        options = {
            'size': vol['size'],
            'user_id': context.user_id,
            'project_id': context.project_id,
            'snapshot_id': None,
            'availability_zone': availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': vol['name'],
            'display_description': vol['description'],
            'volume_type_id': vol['volume_type_id'],
            'metadata': dict(to_vsa_id=vsa_id),
            }

        size = vol['size']
        host = vol['host']
        name = vol['name']
        LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
                    "host %(host)s"), locals())

        volume_ref = db.volume_create(context.elevated(), options)
        driver.cast_to_volume_host(context, vol['host'],
                'create_volume', volume_id=volume_ref['id'],
                snapshot_id=None)
Example #8
0
 def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
              roles=None, remote_address=None, timestamp=None,
              request_id=None, auth_token=None, strategy='noauth'):
     """
     :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
         indicates deleted records are visible, 'only' indicates that
         *only* deleted records are visible.
     """
     self.user_id = user_id
     self.project_id = project_id
     self.roles = roles or []
     self.is_admin = is_admin
     if self.is_admin is None:
         self.is_admin = 'admin' in [x.lower() for x in self.roles]
     self.read_deleted = read_deleted
     self.remote_address = remote_address
     if not timestamp:
         timestamp = utils.utcnow()
     if isinstance(timestamp, basestring):
         timestamp = utils.parse_strtime(timestamp)
     self.timestamp = timestamp
     if not request_id:
         request_id = unicode(uuid.uuid4())
     self.request_id = request_id
     self.auth_token = auth_token
     self.strategy = strategy
     local.store.context = self
Example #9
0
def notify_usage_exists(instance_ref, current_period=False):
    """ Generates 'exists' notification for an instance for usage auditing
        purposes.

        Generates usage for last completed period, unless 'current_period'
        is True."""
    admin_context = context.get_admin_context()
    begin, end = utils.current_audit_period()
    bw = {}
    if current_period:
        audit_start = end
        audit_end = utils.utcnow()
    else:
        audit_start = begin
        audit_end = end
    for b in db.bw_usage_get_by_instance(admin_context, instance_ref['id'],
                                         audit_start):
        bw[b.network_label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)
    usage_info = utils.usage_from_instance(
        instance_ref,
        audit_period_beginning=str(audit_start),
        audit_period_ending=str(audit_end),
        bandwidth=bw)
    notifier_api.notify('compute.%s' % FLAGS.host, 'compute.instance.exists',
                        notifier_api.INFO, usage_info)
Example #10
0
 def describe_hosts(self, context, **_kwargs):
     """Returns status info for all nodes. Includes:
         * Hostname
         * Compute (up, down, None)
         * Instance count
         * Volume (up, down, None)
         * Volume Count
     """
     services = db.service_get_all(context, False)
     now = utils.utcnow()
     hosts = []
     rv = []
     for host in [service['host'] for service in services]:
         if not host in hosts:
             hosts.append(host)
     for host in hosts:
         compute = [s for s in services if s['host'] == host \
                                        and s['binary'] == 'engine-compute']
         if compute:
             compute = compute[0]
         instances = db.instance_get_all_by_host(context, host)
         volume = [s for s in services if s['host'] == host \
                                        and s['binary'] == 'engine-volume']
         if volume:
             volume = volume[0]
         volumes = db.volume_get_all_by_host(context, host)
         rv.append(host_dict(host, compute, instances, volume, volumes,
                             now))
     return {'hosts': rv}
Example #11
0
 def __init__(self,
              user_id,
              project_id,
              is_admin=None,
              read_deleted="no",
              roles=None,
              remote_address=None,
              timestamp=None,
              request_id=None,
              auth_token=None,
              strategy='noauth'):
     """
     :param read_deleted: 'no' indicates deleted records are hidden, 'yes'
         indicates deleted records are visible, 'only' indicates that
         *only* deleted records are visible.
     """
     self.user_id = user_id
     self.project_id = project_id
     self.roles = roles or []
     self.is_admin = is_admin
     if self.is_admin is None:
         self.is_admin = 'admin' in [x.lower() for x in self.roles]
     self.read_deleted = read_deleted
     self.remote_address = remote_address
     if not timestamp:
         timestamp = utils.utcnow()
     if isinstance(timestamp, basestring):
         timestamp = utils.parse_strtime(timestamp)
     self.timestamp = timestamp
     if not request_id:
         request_id = unicode(uuid.uuid4())
     self.request_id = request_id
     self.auth_token = auth_token
     self.strategy = strategy
     local.store.context = self
Example #12
0
    def _provision_volume(self, context, vol, vsa_id, availability_zone):

        if availability_zone is None:
            availability_zone = FLAGS.storage_availability_zone

        now = utils.utcnow()
        options = {
            'size': vol['size'],
            'user_id': context.user_id,
            'project_id': context.project_id,
            'snapshot_id': None,
            'availability_zone': availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': vol['name'],
            'display_description': vol['description'],
            'volume_type_id': vol['volume_type_id'],
            'metadata': dict(to_vsa_id=vsa_id),
        }

        size = vol['size']
        host = vol['host']
        name = vol['name']
        LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
                    "host %(host)s"), locals())

        volume_ref = db.volume_create(context.elevated(), options)
        driver.cast_to_volume_host(context,
                                   vol['host'],
                                   'create_volume',
                                   volume_id=volume_ref['id'],
                                   snapshot_id=None)
Example #13
0
 def host_service_caps_stale(self, host, service):
     """Check if host service capabilites are not recent enough."""
     allowed_time_diff = FLAGS.periodic_interval * 3
     caps = self.service_states[host][service]
     if (utils.utcnow() - caps["timestamp"]) <= \
         datetime.timedelta(seconds=allowed_time_diff):
         return False
     return True
 def update_metadata(self, zone_metadata):
     """Update zone metadata after successful communications with
        child zone."""
     self.last_seen = utils.utcnow()
     self.attempt = 0
     self.capabilities = ", ".join(["%s=%s" % (k, v)
                     for k, v in zone_metadata.iteritems() if k != 'name'])
     self.is_active = True
 def host_service_caps_stale(self, host, service):
     """Check if host service capabilites are not recent enough."""
     allowed_time_diff = FLAGS.periodic_interval * 3
     caps = self.service_states[host][service]
     if (utils.utcnow() - caps["timestamp"]) <= \
         datetime.timedelta(seconds=allowed_time_diff):
         return False
     return True
 def update_service_capabilities(self, service_name, host, capabilities):
     """Update the per-service capabilities based on this notification."""
     logging.debug(_("Received %(service_name)s service update from "
             "%(host)s.") % locals())
     service_caps = self.service_states.get(host, {})
     capabilities["timestamp"] = utils.utcnow()  # Reported time
     service_caps[service_name] = capabilities
     self.service_states[host] = service_caps
Example #17
0
    def test_get_zone_capabilities_one_stale_service_per_host(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # Two host services among four become stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
        zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
        zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
        serv_caps_1 = zm.service_states["host1"]["svc2"]
        serv_caps_1["timestamp"] = utils.utcnow() - \
                               datetime.timedelta(seconds=expiry_time)
        serv_caps_2 = zm.service_states["host2"]["svc1"]
        serv_caps_2["timestamp"] = utils.utcnow() - \
                               datetime.timedelta(seconds=expiry_time)
        caps = zm.get_zone_capabilities(None)
        self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2),
                                     svc2_a=(7, 7), svc2_b=(8, 8)))
Example #18
0
 def update_service_capabilities(self, service_name, host, capabilities):
     """Update the per-service capabilities based on this notification."""
     logging.debug(
         _("Received %(service_name)s service update from "
           "%(host)s.") % locals())
     service_caps = self.service_states.get(host, {})
     capabilities["timestamp"] = utils.utcnow()  # Reported time
     service_caps[service_name] = capabilities
     self.service_states[host] = service_caps
Example #19
0
 def update_metadata(self, zone_metadata):
     """Update zone metadata after successful communications with
        child zone."""
     self.last_seen = utils.utcnow()
     self.attempt = 0
     self.capabilities = ", ".join([
         "%s=%s" % (k, v) for k, v in zone_metadata.iteritems()
         if k != 'name'
     ])
     self.is_active = True
Example #20
0
    def test_get_zone_capabilities_one_stale_service_per_host(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # Two host services among four become stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
        zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
        zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
        serv_caps_1 = zm.service_states["host1"]["svc2"]
        serv_caps_1["timestamp"] = utils.utcnow() - \
                               datetime.timedelta(seconds=expiry_time)
        serv_caps_2 = zm.service_states["host2"]["svc1"]
        serv_caps_2["timestamp"] = utils.utcnow() - \
                               datetime.timedelta(seconds=expiry_time)
        caps = zm.get_zone_capabilities(None)
        self.assertEquals(
            caps,
            dict(svc1_a=(1, 1), svc1_b=(2, 2), svc2_a=(7, 7), svc2_b=(8, 8)))
Example #21
0
    def test_get_zone_capabilities_expired_host(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # Service capabilities stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
        utils.set_time_override(time_future)
        caps = zm.get_zone_capabilities(None)
        self.assertEquals(caps, {})
        utils.clear_time_override()
Example #22
0
    def test_get_zone_capabilities_expired_host(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # Service capabilities stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
        utils.set_time_override(time_future)
        caps = zm.get_zone_capabilities(None)
        self.assertEquals(caps, {})
        utils.clear_time_override()
Example #23
0
    def test_host_service_caps_stale_one_stale_service(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # One service became stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
        caps = zm.service_states["host1"]["svc1"]
        caps["timestamp"] = utils.utcnow() - \
                               datetime.timedelta(seconds=expiry_time)
        self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
        self.assertFalse(zm.host_service_caps_stale("host1", "svc2"))
Example #24
0
    def test_get_zone_capabilities_one_stale_host(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # One host service capabilities become stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
        serv_caps = zm.service_states["host1"]["svc1"]
        serv_caps["timestamp"] = utils.utcnow() - \
                               datetime.timedelta(seconds=expiry_time)
        caps = zm.get_zone_capabilities(None)
        self.assertEquals(caps, dict(svc1_a=(3, 3), svc1_b=(4, 4)))
Example #25
0
    def test_host_service_caps_stale_all_stale_services(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # Both services became stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
        time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
        utils.set_time_override(time_future)
        self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
        self.assertTrue(zm.host_service_caps_stale("host1", "svc2"))
        utils.clear_time_override()
Example #26
0
    def test_get_zone_capabilities_one_stale_host(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # One host service capabilities become stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
        serv_caps = zm.service_states["host1"]["svc1"]
        serv_caps["timestamp"] = utils.utcnow() - \
                               datetime.timedelta(seconds=expiry_time)
        caps = zm.get_zone_capabilities(None)
        self.assertEquals(caps, dict(svc1_a=(3, 3), svc1_b=(4, 4)))
Example #27
0
    def test_host_service_caps_stale_one_stale_service(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # One service became stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
        caps = zm.service_states["host1"]["svc1"]
        caps["timestamp"] = utils.utcnow() - \
                               datetime.timedelta(seconds=expiry_time)
        self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
        self.assertFalse(zm.host_service_caps_stale("host1", "svc2"))
Example #28
0
    def test_host_service_caps_stale_all_stale_services(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # Both services became stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
        time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
        utils.set_time_override(time_future)
        self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
        self.assertTrue(zm.host_service_caps_stale("host1", "svc2"))
        utils.clear_time_override()
Example #29
0
 def delete(self, context, volume_id):
     volume = self.get(context, volume_id)
     if volume['status'] != "available":
         raise exception.ApiError(_("Volume status must be available"))
     now = utils.utcnow()
     self.db.volume_update(context, volume_id, {'status': 'deleting',
                                                'terminated_at': now})
     host = volume['host']
     rpc.cast(context,
              self.db.queue_get_for(context, FLAGS.volume_topic, host),
              {"method": "delete_volume",
               "args": {"volume_id": volume_id}})
Example #30
0
def notify(publisher_id, event_type, priority, payload):
    """
    Sends a notification using the specified driver

    Notify parameters:

    publisher_id - the source worker_type.host of the message
    event_type - the literal type of event (ex. Instance Creation)
    priority - patterned after the enumeration of Python logging levels in
               the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    payload - A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id - a UUID representing the id for this notification
    timestamp - the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example:

    {'message_id': str(uuid.uuid4()),
     'publisher_id': 'compute.host1',
     'timestamp': utils.utcnow(),
     'priority': 'WARN',
     'event_type': 'compute.create_instance',
     'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(
                 _('%s not in valid priorities' % priority))

    # Ensure everything is JSON serializable.
    payload = utils.to_primitive(payload, convert_instances=True)

    driver = utils.import_object(FLAGS.notification_driver)
    msg = dict(message_id=str(uuid.uuid4()),
                   publisher_id=publisher_id,
                   event_type=event_type,
                   priority=priority,
                   payload=payload,
                   timestamp=str(utils.utcnow()))
    try:
        driver.notify(msg)
    except Exception, e:
        LOG.exception(_("Problem '%(e)s' attempting to "
                        "send to notification system. Payload=%(payload)s" %
                        locals()))
Example #31
0
    def test_get_zone_capabilities_all_stale_host_services(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # All the host services  become stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
        zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
        zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
        time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
        utils.set_time_override(time_future)
        caps = zm.get_zone_capabilities(None)
        self.assertEquals(caps, {})
Example #32
0
def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a volume host queue"""

    if update_db:
        volume_id = kwargs.get('volume_id', None)
        if volume_id is not None:
            now = utils.utcnow()
            db.volume_update(context, volume_id,
                    {'host': host, 'scheduled_at': now})
    rpc.cast(context,
            db.queue_get_for(context, 'volume', host),
            {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
Example #33
0
    def test_get_zone_capabilities_all_stale_host_services(self):
        zm = zone_manager.ZoneManager()
        expiry_time = (FLAGS.periodic_interval * 3) + 1

        # All the host services  become stale
        zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
        zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
        zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
        zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
        time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
        utils.set_time_override(time_future)
        caps = zm.get_zone_capabilities(None)
        self.assertEquals(caps, {})
Example #34
0
def notify(publisher_id, event_type, priority, payload):
    """
    Sends a notification using the specified driver

    Notify parameters:

    publisher_id - the source worker_type.host of the message
    event_type - the literal type of event (ex. Instance Creation)
    priority - patterned after the enumeration of Python logging levels in
               the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    payload - A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id - a UUID representing the id for this notification
    timestamp - the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example:

    {'message_id': str(uuid.uuid4()),
     'publisher_id': 'compute.host1',
     'timestamp': utils.utcnow(),
     'priority': 'WARN',
     'event_type': 'compute.create_instance',
     'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(_('%s not in valid priorities' % priority))

    # Ensure everything is JSON serializable.
    payload = utils.to_primitive(payload, convert_instances=True)

    driver = utils.import_object(FLAGS.notification_driver)
    msg = dict(message_id=str(uuid.uuid4()),
               publisher_id=publisher_id,
               event_type=event_type,
               priority=priority,
               payload=payload,
               timestamp=str(utils.utcnow()))
    try:
        driver.notify(msg)
    except Exception, e:
        LOG.exception(
            _("Problem '%(e)s' attempting to "
              "send to notification system. Payload=%(payload)s" % locals()))
Example #35
0
def cast_to_compute_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a compute host queue"""

    if update_db:
        # fall back on the id if the uuid is not present
        instance_id = kwargs.get('instance_id', None)
        instance_uuid = kwargs.get('instance_uuid', instance_id)
        if instance_uuid is not None:
            now = utils.utcnow()
            db.instance_update(context, instance_uuid,
                    {'host': host, 'scheduled_at': now})
    rpc.cast(context,
            db.queue_get_for(context, 'compute', host),
            {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
Example #36
0
def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a volume host queue"""

    if update_db:
        volume_id = kwargs.get('volume_id', None)
        if volume_id is not None:
            now = utils.utcnow()
            db.volume_update(context, volume_id, {
                'host': host,
                'scheduled_at': now
            })
    rpc.cast(context, db.queue_get_for(context, 'volume', host), {
        "method": method,
        "args": kwargs
    })
    LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
    def log_error(self, exception):
        """Something went wrong. Check to see if zone should be
           marked as offline."""
        self.last_exception = exception
        self.last_exception_time = utils.utcnow()
        api_url = self.api_url
        logging.warning(_("'%(exception)s' error talking to "
                          "zone %(api_url)s") % locals())

        max_errors = FLAGS.zone_failures_to_offline
        self.attempt += 1
        if self.attempt >= max_errors:
            self.is_active = False
            logging.error(_("No answer from zone %(api_url)s "
                            "after %(max_errors)d "
                            "attempts. Marking inactive.") % locals())
Example #38
0
 def delete(self, context, volume_id):
     volume = self.get(context, volume_id)
     if volume['status'] != "available":
         raise exception.ApiError(_("Volume status must be available"))
     now = utils.utcnow()
     self.db.volume_update(context, volume_id, {
         'status': 'deleting',
         'terminated_at': now
     })
     host = volume['host']
     rpc.cast(context,
              self.db.queue_get_for(context, FLAGS.volume_topic, host), {
                  "method": "delete_volume",
                  "args": {
                      "volume_id": volume_id
                  }
              })
Example #39
0
    def create_volume(self, context, volume_id, snapshot_id=None):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        LOG.info(_("volume %s: creating"), volume_ref['name'])

        self.db.volume_update(context,
                              volume_id,
                              {'host': self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(_("volume %(vol_name)s: creating lv of"
                    " size %(vol_size)sG") % locals())
            if snapshot_id is None:
                model_update = self.driver.create_volume(volume_ref)
            else:
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
                model_update = self.driver.create_volume_from_snapshot(
                    volume_ref,
                    snapshot_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)
        except Exception:
            with utils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_ref['id'], {'status': 'error'})
                self._notify_vsa(context, volume_ref, 'error')

        now = utils.utcnow()
        self.db.volume_update(context,
                              volume_ref['id'], {'status': 'available',
                                                 'launched_at': now})
        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
        self._notify_vsa(context, volume_ref, 'available')
        self._reset_stats()
        return volume_id
Example #40
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()
        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = utils.utcnow()
        shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
                        os.path.join(FLAGS.state_path, FLAGS.sqlite_db))

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        self.mox = mox.Mox()
        self.stubs = stubout.StubOutForTesting()
        self.injected = []
        self._services = []
        self._original_flags = FLAGS.FlagValuesDict()
Example #41
0
    def setUp(self):
        """Run before each test method to initialize test environment."""
        super(TestCase, self).setUp()
        # NOTE(vish): We need a better method for creating fixtures for tests
        #             now that we have some required db setup for the system
        #             to work properly.
        self.start = utils.utcnow()
        shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
                        os.path.join(FLAGS.state_path, FLAGS.sqlite_db))

        # emulate some of the mox stuff, we can't use the metaclass
        # because it screws with our generators
        self.mox = mox.Mox()
        self.stubs = stubout.StubOutForTesting()
        self.injected = []
        self._services = []
        self._original_flags = FLAGS.FlagValuesDict()
Example #42
0
    def create_volume(self, context, volume_id, snapshot_id=None):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        LOG.info(_("volume %s: creating"), volume_ref['name'])

        self.db.volume_update(context, volume_id, {'host': self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(
                _("volume %(vol_name)s: creating lv of"
                  " size %(vol_size)sG") % locals())
            if snapshot_id is None:
                model_update = self.driver.create_volume(volume_ref)
            else:
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
                model_update = self.driver.create_volume_from_snapshot(
                    volume_ref, snapshot_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)
        except Exception:
            with utils.save_and_reraise_exception():
                self.db.volume_update(context, volume_ref['id'],
                                      {'status': 'error'})
                self._notify_vsa(context, volume_ref, 'error')

        now = utils.utcnow()
        self.db.volume_update(context, volume_ref['id'], {
            'status': 'available',
            'launched_at': now
        })
        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
        self._notify_vsa(context, volume_ref, 'available')
        self._reset_stats()
        return volume_id
Example #43
0
def cast_to_compute_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a compute host queue"""

    if update_db:
        # fall back on the id if the uuid is not present
        instance_id = kwargs.get('instance_id', None)
        instance_uuid = kwargs.get('instance_uuid', instance_id)
        if instance_uuid is not None:
            now = utils.utcnow()
            db.instance_update(context, instance_uuid, {
                'host': host,
                'scheduled_at': now
            })
    rpc.cast(context, db.queue_get_for(context, 'compute', host), {
        "method": method,
        "args": kwargs
    })
    LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
Example #44
0
    def log_error(self, exception):
        """Something went wrong. Check to see if zone should be
           marked as offline."""
        self.last_exception = exception
        self.last_exception_time = utils.utcnow()
        api_url = self.api_url
        logging.warning(
            _("'%(exception)s' error talking to "
              "zone %(api_url)s") % locals())

        max_errors = FLAGS.zone_failures_to_offline
        self.attempt += 1
        if self.attempt >= max_errors:
            self.is_active = False
            logging.error(
                _("No answer from zone %(api_url)s "
                  "after %(max_errors)d "
                  "attempts. Marking inactive.") % locals())
Example #45
0
    def authorize_token(self, token_hash):
        """ retrieves user information from the datastore given a token

        If the token has expired, returns None
        If the token is not found, returns None
        Otherwise returns dict(id=(the authorized user's id))

        This method will also remove the token if the timestamp is older than
        2 days ago.
        """
        ctxt = context.get_admin_context()
        try:
            token = self.db.auth_token_get(ctxt, token_hash)
        except exception.NotFound:
            return None
        if token:
            delta = utils.utcnow() - token['created_at']
            if delta.days >= 2:
                self.db.auth_token_destroy(ctxt, token['token_hash'])
            else:
                return token['user_id']
        return None
Example #46
0
    def authorize_token(self, token_hash):
        """ retrieves user information from the datastore given a token

        If the token has expired, returns None
        If the token is not found, returns None
        Otherwise returns dict(id=(the authorized user's id))

        This method will also remove the token if the timestamp is older than
        2 days ago.
        """
        ctxt = context.get_admin_context()
        try:
            token = self.db.auth_token_get(ctxt, token_hash)
        except exception.NotFound:
            return None
        if token:
            delta = utils.utcnow() - token['created_at']
            if delta.days >= 2:
                self.db.auth_token_destroy(ctxt, token['token_hash'])
            else:
                return token['user_id']
        return None
Example #47
0
    def _generate_default_service_states(self):
        service_states = {}
        for i in range(self.host_num):
            host = {}
            hostname = 'host_' + str(i)
            if hostname in self.exclude_host_list:
                continue

            host['volume'] = {'timestamp': utils.utcnow(),
                              'drive_qos_info': {}}

            for j in range(self.drive_type_start_ix,
                           self.drive_type_start_ix + self.drive_type_num):
                dtype = {}
                dtype['Name'] = 'name_' + str(j)
                dtype['DriveType'] = 'type_' + str(j)
                dtype['TotalDrives'] = 2 * (self.init_num_drives + i)
                dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j)
                dtype['TotalCapacity'] = dtype['TotalDrives'] * \
                                            dtype['DriveCapacity']
                dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \
                                            dtype['DriveCapacity']
                dtype['DriveRpm'] = 7200
                dtype['DifCapable'] = 0
                dtype['SedCapable'] = 0
                dtype['PartitionDrive'] = {
                            'PartitionSize': 0,
                            'NumOccupiedPartitions': 0,
                            'NumFreePartitions': 0}
                dtype['FullDrive'] = {
                            'NumFreeDrives': dtype['TotalDrives'] - i,
                            'NumOccupiedDrives': i}
                host['volume']['drive_qos_info'][dtype['Name']] = dtype

            service_states[hostname] = host

        return service_states
Example #48
0
 def service_is_up(service):
     """Check whether a service is up based on last heartbeat."""
     last_heartbeat = service['updated_at'] or service['created_at']
     # Timestamps in DB are UTC.
     elapsed = utils.total_seconds(utils.utcnow() - last_heartbeat)
     return abs(elapsed) <= FLAGS.service_down_time
Example #49
0
 def __call__(self, req):
     start = utils.utcnow()
     rv = req.get_response(self.application)
     self.log_request_completion(rv, req, start)
     return rv
Example #50
0
 def delete(self, session=None):
     """Delete this object."""
     self.deleted = True
     self.deleted_at = utils.utcnow()
     self.save(session=session)
Example #51
0
import datetime

from sqlalchemy import *
from migrate import *

from engine import log as logging
from engine import utils

meta = MetaData()

# instance info cache table to add to DB
instance_info_caches = Table('instance_info_caches',
                             meta,
                             Column('created_at',
                                    DateTime(timezone=False),
                                    default=utils.utcnow()),
                             Column('updated_at',
                                    DateTime(timezone=False),
                                    onupdate=utils.utcnow()),
                             Column('deleted_at', DateTime(timezone=False)),
                             Column('deleted',
                                    Boolean(create_constraint=True,
                                            name=None)),
                             Column('id', Integer(), primary_key=True),
                             Column('network_info', Text()),
                             Column('instance_id',
                                    String(36),
                                    ForeignKey('instances.uuid'),
                                    nullable=False,
                                    unique=True),
                             mysql_engine='InnoDB')
Example #52
0
def upgrade(migrate_engine):
    meta.bind = migrate_engine
    # grab tables
    instance_info_caches = Table('instance_info_caches', meta, autoload=True)
    instances = Table('instances', meta, autoload=True)
    vifs = Table('virtual_interfaces', meta, autoload=True)
    networks = Table('networks', meta, autoload=True)
    fixed_ips = Table('fixed_ips', meta, autoload=True)
    floating_ips = Table('floating_ips', meta, autoload=True)

    # all of these functions return a python list of python dicts
    # that have nothing to do with sqlalchemy objects whatsoever
    # after returning
    def get_instances():
        # want all instances whether there is network info or not
        s = select([instances.c.id, instances.c.uuid])
        keys = ('id', 'uuid')

        return [dict(zip(keys, row)) for row in s.execute()]

    def get_vifs_by_instance_id(instance_id):
        s = select([vifs.c.id, vifs.c.uuid, vifs.c.address, vifs.c.network_id],
                   vifs.c.instance_id == instance_id)
        keys = ('id', 'uuid', 'address', 'network_id')
        return [dict(zip(keys, row)) for row in s.execute()]

    def get_network_by_id(network_id):
        s = select([networks.c.uuid, networks.c.label,
                    networks.c.project_id,
                    networks.c.dns1, networks.c.dns2,
                    networks.c.cidr, networks.c.cidr_v6,
                    networks.c.gateway, networks.c.gateway_v6,
                    networks.c.injected, networks.c.multi_host,
                    networks.c.bridge, networks.c.bridge_interface,
                    networks.c.vlan],
                   networks.c.id == network_id)
        keys = ('uuid', 'label', 'project_id', 'dns1', 'dns2',
                'cidr', 'cidr_v6', 'gateway', 'gateway_v6',
                'injected', 'multi_host', 'bridge', 'bridge_interface', 'vlan')
        return [dict(zip(keys, row)) for row in s.execute()]

    def get_fixed_ips_by_vif_id(vif_id):
        s = select([fixed_ips.c.id, fixed_ips.c.address],
                   fixed_ips.c.virtual_interface_id == vif_id)
        keys = ('id', 'address')
        fixed_ip_list = [dict(zip(keys, row)) for row in s.execute()]

        # fixed ips have floating ips, so here they are
        for fixed_ip in fixed_ip_list:
            fixed_ip['version'] = 4
            fixed_ip['floating_ips'] =\
                   get_floating_ips_by_fixed_ip_id(fixed_ip['id'])
            fixed_ip['type'] = 'fixed'
            del fixed_ip['id']

        return fixed_ip_list

    def get_floating_ips_by_fixed_ip_id(fixed_ip_id):
        s = select([floating_ips.c.address],
                   floating_ips.c.fixed_ip_id == fixed_ip_id)
        keys = ('address')
        floating_ip_list = [dict(zip(keys, row)) for row in s.execute()]

        for floating_ip in floating_ip_list:
            floating_ip['version'] = 4
            floating_ip['type'] = 'floating'

        return floating_ip_list

    def _ip_dict_from_string(ip_string, type):
        if ip_string:
            ip = {'address': ip_string,
                  'type': type}
            if ':' in ip_string:
                ip['version'] = 6
            else:
                ip['version'] = 4

            return ip

    def _get_fixed_ipv6_dict(cidr, mac, project_id):
        ip_string = ipv6.to_global(cidr, mac, project_id)
        return {'version': 6,
                'address': ip_string,
                'floating_ips': []}

    def _create_subnet(version, network, vif):
        if version == 4:
            cidr = network['cidr']
            gateway = network['gateway']
            ips = get_fixed_ips_by_vif_id(vif['id'])
        else:
            cidr = network['cidr_v6']
            gateway = network['gateway_v6']
            ips = [_get_fixed_ipv6_dict(network['cidr_v6'],
                                        vif['address'],
                                        network['project_id'])]

        # NOTE(tr3buchet) routes is left empty for now because there
        # is no good way to generate them or determine which is default
        subnet = {'version': version,
                  'cidr': cidr,
                  'dns': [],
                  'gateway': _ip_dict_from_string(gateway, 'gateway'),
                  'routes': [],
                  'ips': ips}

        if network['dns1'] and network['dns1']['version'] == version:
            subnet['dns'].append(network['dns1'])
        if network['dns2'] and network['dns2']['version'] == version:
            subnet['dns'].append(network['dns2'])

        return subnet

    # preload caches table
    # list is made up of a row(instance_id, nw_info_json) for each instance
    for instance in get_instances():
        logging.info("Updating %s" % (instance['uuid']))
        instance_id = instance['id']
        instance_uuid = instance['uuid']

        # instances have vifs so aninstance nw_info is
        # is a list of dicts, 1 dict for each vif
        nw_info = get_vifs_by_instance_id(instance_id)
        logging.info("VIFs for Instance %s: \n %s" % \
                        (instance['uuid'], nw_info))
        for vif in nw_info:
            network = get_network_by_id(vif['network_id'])[0]
            logging.info("Network for Instance %s: \n %s" % \
                        (instance['uuid'], network))

            # vifs have a network which has subnets, so create the subnets
            # subnets contain all of the ip information
            network['subnets'] = []

            network['dns1'] = _ip_dict_from_string(network['dns1'], 'dns')
            network['dns2'] = _ip_dict_from_string(network['dns2'], 'dns')

            # engine networks can only have 2 subnets
            if network['cidr']:
                network['subnets'].append(_create_subnet(4, network, vif))
            if network['cidr_v6']:
                network['subnets'].append(_create_subnet(6, network, vif))

            # put network together to fit model
            network['id'] = network.pop('uuid')
            network['meta'] = {}

            # NOTE(tr3buchet) this isn't absolutely necessary as hydration
            # would still work with these as keys, but cache generated by
            # the model would show these keys as a part of meta. i went
            # ahead and set it up the same way just so it looks the same
            if network['project_id']:
                network['meta']['project_id'] = network['project_id']
            del network['project_id']
            if network['injected']:
                network['meta']['injected'] = network['injected']
            del network['injected']
            if network['multi_host']:
                network['meta']['multi_host'] = network['multi_host']
            del network['multi_host']
            if network['bridge_interface']:
                network['meta']['bridge_interface'] = \
                                                  network['bridge_interface']
            del network['bridge_interface']
            if network['vlan']:
                network['meta']['vlan'] = network['vlan']
            del network['vlan']

            # ip information now lives in the subnet, pull them out of network
            del network['dns1']
            del network['dns2']
            del network['cidr']
            del network['cidr_v6']
            del network['gateway']
            del network['gateway_v6']

            # don't need meta if it's empty
            if not network['meta']:
                del network['meta']

            # put vif together to fit model
            del vif['network_id']
            vif['id'] = vif.pop('uuid')
            vif['network'] = network
            # vif['meta'] could also be set to contain rxtx data here
            # but it isn't exposed in the api and is still being rewritten

            logging.info("VIF network for instance %s: \n %s" % \
                        (instance['uuid'], vif['network']))

        # jsonify nw_info
        row = {'created_at': utils.utcnow(),
               'updated_at': utils.utcnow(),
               'instance_id': instance_uuid,
               'network_info': json.dumps(nw_info)}

        # write write row to table
        insert = instance_info_caches.insert().values(**row)
        migrate_engine.execute(insert)
#    under the License.

import datetime

from sqlalchemy import *
from migrate import *

from engine import log as logging
from engine import utils

meta = MetaData()

# instance info cache table to add to DB
instance_info_caches = Table('instance_info_caches', meta,
        Column('created_at', DateTime(timezone=False),
               default=utils.utcnow()),
        Column('updated_at', DateTime(timezone=False),
               onupdate=utils.utcnow()),
        Column('deleted_at', DateTime(timezone=False)),
        Column('deleted', Boolean(create_constraint=True, name=None)),
        Column('id', Integer(), primary_key=True),
        Column('network_info', Text()),
        Column('instance_id', String(36),
               ForeignKey('instances.uuid'),
               nullable=False,
               unique=True),
        mysql_engine='InnoDB')


def upgrade(migrate_engine):
    meta.bind = migrate_engine
Example #54
0
def stub_instance(id, metadata=None, image_ref="10", flavor_id="1",
                  name=None, vm_state=None, task_state=None, uuid=None,
                  access_ip_v4="", access_ip_v6=""):
    if metadata is not None:
        metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()]
    else:
        metadata_items = [{'key':'seq', 'value':id}]

    if uuid is None:
        uuid = FAKE_UUID

    inst_type = instance_types.get_instance_type_by_flavor_id(int(flavor_id))

    instance = {
        "id": int(id),
        "name": str(id),
        "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
        "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
        "admin_pass": "",
        "user_id": "fake",
        "project_id": "fake",
        "image_ref": image_ref,
        "kernel_id": "",
        "ramdisk_id": "",
        "launch_index": 0,
        "key_name": "",
        "key_data": "",
        "vm_state": vm_state or vm_states.ACTIVE,
        "task_state": task_state,
        "memory_mb": 0,
        "vcpus": 0,
        "local_gb": 0,
        "hostname": "",
        "host": "",
        "instance_type": dict(inst_type),
        "user_data": "",
        "reservation_id": "",
        "mac_address": "",
        "scheduled_at": utils.utcnow(),
        "launched_at": utils.utcnow(),
        "terminated_at": utils.utcnow(),
        "availability_zone": "",
        "display_name": name or "server%s" % id,
        "display_description": "",
        "locked": False,
        "metadata": metadata_items,
        "access_ip_v4": access_ip_v4,
        "access_ip_v6": access_ip_v6,
        "uuid": uuid,
        "virtual_interfaces": [],
        "progress": 0,
    }

    instance["fixed_ips"] = [{"address": '192.168.0.1',
                              "network":
                                      {'label': 'public', 'cidr_v6': None},
                              "virtual_interface":
                                      {'address': 'aa:aa:aa:aa:aa:aa'},
                              "floating_ips": []}]

    return instance