Пример #1
0
    def test_trusted_filter_trusted_and_locale_formated_vtime_passes(
            self, req_mock):
        oat_data = {
            "hosts": [
                {
                    "host_name": "host1",
                    "trust_lvl": "trusted",
                    "vtime": timeutils.strtime(fmt="%c")
                },
                {
                    "host_name": "host2",
                    "trust_lvl": "trusted",
                    "vtime": timeutils.strtime(fmt="%D")
                },
                # This is just a broken date to ensure that
                # we're not just arbitrarily accepting any
                # date format.
            ]
        }
        req_mock.return_value = requests.codes.OK, oat_data
        extra_specs = {'trust:trusted_host': 'trusted'}
        filter_properties = {
            'context': mock.sentinel.ctx,
            'instance_type': {
                'memory_mb': 1024,
                'extra_specs': extra_specs
            }
        }
        host = fakes.FakeHostState('host1', 'host1', {})
        bad_host = fakes.FakeHostState('host2', 'host2', {})

        self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
        self.assertFalse(self.filt_cls.host_passes(bad_host,
                                                   filter_properties))
Пример #2
0
    def _register_agent_states(self, lbaas_agents=False):
        """Register two L3 agents and two DHCP agents."""
        l3_hosta = helpers._get_l3_agent_dict(
            L3_HOSTA, constants.L3_AGENT_MODE_LEGACY)
        l3_hostb = helpers._get_l3_agent_dict(
            L3_HOSTB, constants.L3_AGENT_MODE_LEGACY)
        dhcp_hosta = helpers._get_dhcp_agent_dict(DHCP_HOSTA)
        dhcp_hostc = helpers._get_dhcp_agent_dict(DHCP_HOSTC)
        helpers.register_l3_agent(host=L3_HOSTA)
        helpers.register_l3_agent(host=L3_HOSTB)
        helpers.register_dhcp_agent(host=DHCP_HOSTA)
        helpers.register_dhcp_agent(host=DHCP_HOSTC)

        res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc]
        if lbaas_agents:
            lbaas_hosta = {
                'binary': 'neutron-loadbalancer-agent',
                'host': LBAAS_HOSTA,
                'topic': 'LOADBALANCER_AGENT',
                'configurations': {'device_drivers': ['haproxy_ns']},
                'agent_type': constants.AGENT_TYPE_LOADBALANCER}
            lbaas_hostb = copy.deepcopy(lbaas_hosta)
            lbaas_hostb['host'] = LBAAS_HOSTB
            callback = agents_db.AgentExtRpcCallback()
            callback.report_state(self.adminContext,
                                  agent_state={'agent_state': lbaas_hosta},
                                  time=timeutils.strtime())
            callback.report_state(self.adminContext,
                                  agent_state={'agent_state': lbaas_hostb},
                                  time=timeutils.strtime())
            res += [lbaas_hosta, lbaas_hostb]

        return res
Пример #3
0
 def test_scheduler_equal_distribution(self):
     cfg.CONF.set_override('dhcp_agents_per_network', 1)
     self._save_networks(['1111', '2222', '3333'])
     agents = self._get_agents(['host-c', 'host-d'])
     self._save_agents(agents)
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.ctx,
                           agent_state={'agent_state': self.hostc},
                           time=timeutils.strtime())
     callback.report_state(self.ctx,
                           agent_state={'agent_state': self.hostd},
                           time=timeutils.strtime())
     self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
                                            {'id': '1111'})
     agent1 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
                                                           ['1111'])
     self.hostd['configurations']['networks'] = 2
     callback.report_state(self.ctx,
                           agent_state={'agent_state': self.hostd},
                           time=timeutils.strtime())
     self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
                                            {'id': '2222'})
     agent2 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
                                                           ['2222'])
     self.hostc['configurations']['networks'] = 4
     callback.report_state(self.ctx,
                           agent_state={'agent_state': self.hostc},
                           time=timeutils.strtime())
     self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
                                            {'id': '3333'})
     agent3 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
                                                           ['3333'])
     self.assertEqual('host-c', agent1[0]['host'])
     self.assertEqual('host-c', agent2[0]['host'])
     self.assertEqual('host-d', agent3[0]['host'])
Пример #4
0
 def test_scheduler_equal_distribution(self):
     cfg.CONF.set_override('dhcp_agents_per_network', 1)
     self._save_networks(['1111', '2222', '3333'])
     agents = self._get_agents(['host-c', 'host-d'])
     self._save_agents(agents)
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.ctx,
                           agent_state={'agent_state': self.hostc},
                           time=timeutils.strtime())
     callback.report_state(self.ctx,
                           agent_state={'agent_state': self.hostd},
                           time=timeutils.strtime())
     self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
                                            {'id': '1111'})
     agent1 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
                                                           ['1111'])
     self.hostd['configurations']['networks'] = 2
     callback.report_state(self.ctx,
                           agent_state={'agent_state': self.hostd},
                           time=timeutils.strtime())
     self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
                                            {'id': '2222'})
     agent2 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
                                                           ['2222'])
     self.hostc['configurations']['networks'] = 4
     callback.report_state(self.ctx,
                           agent_state={'agent_state': self.hostc},
                           time=timeutils.strtime())
     self.plugin.network_scheduler.schedule(self.plugin, self.ctx,
                                            {'id': '3333'})
     agent3 = self.plugin.get_dhcp_agents_hosting_networks(self.ctx,
                                                           ['3333'])
     self.assertEqual('host-c', agent1[0]['host'])
     self.assertEqual('host-c', agent2[0]['host'])
     self.assertEqual('host-d', agent3[0]['host'])
Пример #5
0
 def wrapper(obj, *args, **kwargs):
     args = [timeutils.strtime(at=arg) if isinstance(arg, datetime.datetime) else arg for arg in args]
     for k, v in six.iteritems(kwargs):
         if k == "exc_val" and v:
             kwargs[k] = str(v)
         elif k == "exc_tb" and v and not isinstance(v, six.string_types):
             kwargs[k] = "".join(traceback.format_tb(v))
         elif isinstance(v, datetime.datetime):
             kwargs[k] = timeutils.strtime(at=v)
     if hasattr(fn, "__call__"):
         return fn(obj, *args, **kwargs)
     # NOTE(danms): We wrap a descriptor, so use that protocol
     return fn.__get__(None, obj)(*args, **kwargs)
Пример #6
0
 def wrapper(obj, *args, **kwargs):
     args = [timeutils.strtime(at=arg) if isinstance(arg, datetime.datetime)
             else arg for arg in args]
     for k, v in six.iteritems(kwargs):
         if k == 'exc_val' and v:
             kwargs[k] = str(v)
         elif k == 'exc_tb' and v and not isinstance(v, six.string_types):
             kwargs[k] = ''.join(traceback.format_tb(v))
         elif isinstance(v, datetime.datetime):
             kwargs[k] = timeutils.strtime(at=v)
     if hasattr(fn, '__call__'):
         return fn(obj, *args, **kwargs)
     # NOTE(danms): We wrap a descriptor, so use that protocol
     return fn.__get__(None, obj)(*args, **kwargs)
 def test_get(self, mock_get):
     task_log = objects.TaskLog.get(self.context,
                                    fake_task_log['task_name'],
                                    fake_task_log['period_beginning'],
                                    fake_task_log['period_ending'],
                                    fake_task_log['host'],
                                    state=fake_task_log['state'])
     mock_get.assert_called_once_with(
         self.context,
         fake_task_log['task_name'],
         timeutils.strtime(at=fake_task_log['period_beginning']),
         timeutils.strtime(at=fake_task_log['period_ending']),
         fake_task_log['host'],
         state=fake_task_log['state'])
     self.compare_obj(task_log, fake_task_log)
Пример #8
0
 def test_get(self, mock_get):
     task_log = objects.TaskLog.get(self.context,
                                    fake_task_log['task_name'],
                                    fake_task_log['period_beginning'],
                                    fake_task_log['period_ending'],
                                    fake_task_log['host'],
                                    state=fake_task_log['state'])
     mock_get.assert_called_once_with(
         self.context,
         fake_task_log['task_name'],
         timeutils.strtime(at=fake_task_log['period_beginning']),
         timeutils.strtime(at=fake_task_log['period_ending']),
         fake_task_log['host'],
         state=fake_task_log['state'])
     self.compare_obj(task_log, fake_task_log)
Пример #9
0
 def to_dict(self):
     values = super(RequestContext, self).to_dict()
     # FIXME(dims): defensive hasattr() checks need to be
     # removed once we figure out why we are seeing stack
     # traces
     values.update({
         'user_id':
         getattr(self, 'user_id', None),
         'project_id':
         getattr(self, 'project_id', None),
         'is_admin':
         getattr(self, 'is_admin', None),
         'remote_address':
         getattr(self, 'remote_address', None),
         'timestamp':
         timeutils.strtime(self.timestamp)
         if hasattr(self, 'timestamp') else None,
         'request_id':
         getattr(self, 'request_id', None),
         'quota_class':
         getattr(self, 'quota_class', None),
         'user_name':
         getattr(self, 'user_name', None),
         'service_catalog':
         getattr(self, 'service_catalog', None),
         'project_name':
         getattr(self, 'project_name', None),
         'is_os_admin':
         getattr(self, 'is_os_admin', None),
         'api_version':
         getattr(self, 'api_version', None),
     })
     return values
Пример #10
0
    def unmanage(self, volume):
        """Mark SolidFire Volume as unmanaged (export from Cinder)."""

        LOG.debug("Enter SolidFire unmanage...")
        sfaccount = self._get_sfaccount(volume['project_id'])
        if sfaccount is None:
            LOG.error(_LE("Account for Volume ID %s was not found on "
                          "the SolidFire Cluster while attempting "
                          "unmanage operation!") % volume['id'])
            raise exception.SolidFireAPIException("Failed to find account "
                                                  "for volume.")

        params = {'accountID': sfaccount['accountID']}
        sf_vol = self._get_sf_volume(volume['id'], params)
        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=volume['id'])

        export_time = timeutils.strtime()
        attributes = sf_vol['attributes']
        attributes['os_exported_at'] = export_time
        params = {'volumeID': int(sf_vol['volumeID']),
                  'attributes': attributes}

        data = self._issue_api_request('ModifyVolume',
                                       params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
Пример #11
0
 def _cooldown_timestamp(self, reason):
     # Save resource metadata with a timestamp and reason
     # If we wanted to implement the AutoScaling API like AWS does,
     # we could maintain event history here, but since we only need
     # the latest event for cooldown, just store that for now
     metadata = {timeutils.strtime(): reason}
     self.metadata_set(metadata)
Пример #12
0
    def _format_server(self, server, **kwargs):
        _server = {}
        _server["id"] = server.id

        nics = kwargs.get('nics', [])
        nics_dict = dict((n['v4-fixed-ip'], n['net-id']) for n in nics)

        addresses = []
        if server.addresses.values():
            for net in server.addresses.values()[0]:
                addr = {}
                addr['address'] = net['addr']
                addr['mac_address'] = net.get('OS-EXT-IPS-MAC:mac_addr')
                addr['network_id'] = nics_dict.get(net['addr'])
                addr['version'] = net['version']
                addresses.append(addr)
 
        _server["addresses"] = addresses

        try:
            _server["host"] = getattr(server, 'OS-EXT-SRV-ATTR:host')
        except:
            _server["host"] = server.host

        _server["fake_hostname"] = _server["host"]
        _server["status"] = server.status.lower()
        _server["keystone_project_id"] = server.tenant_id
        _server["power_state"] = getattr(server, 'OS-EXT-STS:power_state')
        _server["created_at"] = timeutils.strtime(
                timeutils.parse_isotime(server.created))

        _server.update(kwargs)

        return _server
Пример #13
0
 def to_dict(self):
     value = super(RequestContext, self).to_dict()
     value.update({'auth_token': self.auth_token,
                   'auth_url': self.auth_url,
                   'domain_id': self.domain_id,
                   'domain_name': self.domain_name,
                   'user_domain_id': self.user_domain_id,
                   'user_domain_name': self.user_domain_name,
                   'user_name': self.user_name,
                   'user_id': self.user_id,
                   'project_name': self.project_name,
                   'project_id': self.project_id,
                   'is_admin': self.is_admin,
                   'read_only': self.read_only,
                   'roles': self.roles,
                   'show_deleted': self.show_deleted,
                   'request_id': self.request_id,
                   'trust_id': self.trust_id,
                   'auth_token_info': self.auth_token_info,
                   'password': self.password,
                   'all_tenants': self.all_tenants,
                   'timestamp': timeutils.strtime(self.timestamp) if
                   hasattr(self, 'timestamp') else None
                   })
     return value
Пример #14
0
def register_extension(vim, key, type, label="OpenStack", summary="OpenStack services", version="1.0"):
    """Create a new extention.

    :param vim: Vim object
    :param key: the key for the extension
    :param type: Managed entity type, as defined by the extension. This
                 matches the type field in the configuration about a
                 virtual machine or vApp
    :param label: Display label
    :param summary: Summary description
    :param version: Extension version number as a dot-separated string
    """
    extension_manager = vim.service_content.extensionManager
    client_factory = vim.client.factory
    os_ext = client_factory.create("ns0:Extension")
    os_ext.key = key
    entity_info = client_factory.create("ns0:ExtManagedEntityInfo")
    entity_info.type = type
    os_ext.managedEntityInfo = [entity_info]
    os_ext.version = version
    desc = client_factory.create("ns0:Description")
    desc.label = label
    desc.summary = summary
    os_ext.description = desc
    os_ext.lastHeartbeatTime = timeutils.strtime()
    vim.client.service.RegisterExtension(extension_manager, os_ext)
Пример #15
0
 def test_validate_ec2_timestamp_advanced_time_expired(self):
     timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
     params = {
         'Timestamp': timeutils.strtime(timestamp, "%Y-%m-%dT%H:%M:%SZ")
     }
     expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
     self.assertTrue(expired)
Пример #16
0
    def test_delete_action(self, mock_utcnow):
        test_time = datetime.datetime(2000, 1, 1, 0, 0)
        mock_utcnow.return_value = test_time
        self.delete('/actions/%s' % self.action.uuid)
        response = self.get_json('/actions/%s' % self.action.uuid,
                                 expect_errors=True)
        self.assertEqual(404, response.status_int)
        self.assertEqual('application/json', response.content_type)
        self.assertTrue(response.json['error_message'])

        self.context.show_deleted = True
        action = objects.Action.get_by_uuid(self.context, self.action.uuid)

        return_deleted_at = timeutils.strtime(action['deleted_at'])
        self.assertEqual(timeutils.strtime(test_time), return_deleted_at)
        self.assertEqual(action['state'], 'DELETED')
Пример #17
0
    def unmanage(self, volume):
        """Mark SolidFire Volume as unmanaged (export from Cinder)."""

        LOG.debug("Enter SolidFire unmanage...")
        sfaccount = self._get_sfaccount(volume['project_id'])
        if sfaccount is None:
            LOG.error(_LE("Account for Volume ID %s was not found on "
                          "the SolidFire Cluster while attempting "
                          "unmanage operation!") % volume['id'])
            raise exception.SolidFireAPIException("Failed to find account "
                                                  "for volume.")

        params = {'accountID': sfaccount['accountID']}
        sf_vol = self._get_sf_volume(volume['id'], params)
        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=volume['id'])

        export_time = timeutils.strtime()
        attributes = sf_vol['attributes']
        attributes['os_exported_at'] = export_time
        params = {'volumeID': int(sf_vol['volumeID']),
                  'attributes': attributes}

        data = self._issue_api_request('ModifyVolume',
                                       params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
Пример #18
0
    def show(self, req, id):
        """Retrieve tenant_usage for a specified tenant."""
        tenant_id = id
        context = req.environ['nova.context']

        authorize_show(context, {'project_id': tenant_id})

        try:
            (period_start, period_stop, ignore) = self._get_datetime_range(
                req)
        except exception.InvalidStrTime as e:
            raise exc.HTTPBadRequest(explanation=e.format_message())

        now = timeutils.parse_isotime(timeutils.strtime())
        if period_stop > now:
            period_stop = now
        usage = self._tenant_usages_for_period(context,
                                               period_start,
                                               period_stop,
                                               tenant_id=tenant_id,
                                               detailed=True)
        if len(usage):
            usage = usage[0]
        else:
            usage = {}
        return {'tenant_usage': usage}
Пример #19
0
    def new_trust_ref(self, trustor_user_id, trustee_user_id, project_id=None,
                      impersonation=None, expires=None, role_ids=None,
                      role_names=None, remaining_uses=None,
                      allow_redelegation=False):
        ref = dict()
        ref['id'] = uuid.uuid4().hex
        ref['trustor_user_id'] = trustor_user_id
        ref['trustee_user_id'] = trustee_user_id
        ref['impersonation'] = impersonation or False
        ref['project_id'] = project_id
        ref['remaining_uses'] = remaining_uses
        ref['allow_redelegation'] = allow_redelegation

        if isinstance(expires, six.string_types):
            ref['expires_at'] = expires
        elif isinstance(expires, dict):
            ref['expires_at'] = timeutils.strtime(
                timeutils.utcnow() + datetime.timedelta(**expires),
                fmt=TIME_FORMAT)
        elif expires is None:
            pass
        else:
            raise NotImplementedError('Unexpected value for "expires"')

        role_ids = role_ids or []
        role_names = role_names or []
        if role_ids or role_names:
            ref['roles'] = []
            for role_id in role_ids:
                ref['roles'].append({'id': role_id})
            for role_name in role_names:
                ref['roles'].append({'name': role_name})

        return ref
Пример #20
0
 def _build_bulk_index(event_list):
     for ev in event_list:
         traits = {}
         for t in ev.traits:
             name = t.name
             value = t.value
             if name in datetime_trait_fields:
                 try:
                     ts = timeutils.parse_isotime(value)
                     ts = timeutils.normalize_time(ts)
                     value = timeutils.strtime(ts)
                 except ValueError:
                     LOG.exception(
                         _LE('Could not parse timestamp [%s] from [%s] traits field'
                             % (value, name)))
                     value = t.value
             traits[name] = value
         yield {
             '_op_type':
             'create',
             '_index':
             '%s_%s' %
             (self.index_name, ev.generated.date().isoformat()),
             '_type':
             ev.event_type,
             '_id':
             ev.message_id,
             '_source': {
                 'timestamp': ev.generated.isoformat(),
                 'traits': traits,
                 'raw': ev.raw
             }
         }
Пример #21
0
 def to_dict(self):
     user_idt = '{user} {tenant}'.format(user=self.user_id or '-',
                                         tenant=self.tenant_id or '-')
     return {'user_id': self.user_id,
             'project_id': self.project_id,
             'is_admin': self.is_admin,
             'read_deleted': self.read_deleted,
             'roles': self.roles,
             'remote_address': self.remote_address,
             'timestamp': timeutils.strtime(self.timestamp),
             'request_id': self.request_id,
             'auth_token': self.auth_token,
             'quota_class': self.quota_class,
             'user_name': self.user_name,
             'username': self.username,
             'password': self.password,
             'service_catalog': self.service_catalog,
             'project_name': self.project_name,
             'instance_lock_checked': self.instance_lock_checked,
             'tenant': self.tenant,
             'tenant_id': self.tenant_id,
             'trust_id': self.trust_id,
             'trustor_user_id': self.trustor_user_id,
             'region_name': self.region_name,
             'user_identity': user_idt,
             'user': self.user,
             'auth_token_info': self.auth_token_info,
             'auth_url': self.auth_url,
             'show_deleted': self.show_deleted,
             'iam_assume_token': self.iam_assume_token,
             'user_domain_id': self.user_domain,
             'project_domain_id': self.project_domain}
Пример #22
0
    def test_delete_action(self, mock_utcnow):
        test_time = datetime.datetime(2000, 1, 1, 0, 0)
        mock_utcnow.return_value = test_time
        self.delete('/actions/%s' % self.action.uuid)
        response = self.get_json('/actions/%s' % self.action.uuid,
                                 expect_errors=True)
        self.assertEqual(404, response.status_int)
        self.assertEqual('application/json', response.content_type)
        self.assertTrue(response.json['error_message'])

        self.context.show_deleted = True
        action = objects.Action.get_by_uuid(self.context, self.action.uuid)

        return_deleted_at = timeutils.strtime(action['deleted_at'])
        self.assertEqual(timeutils.strtime(test_time), return_deleted_at)
        self.assertEqual(action['state'], 'DELETED')
Пример #23
0
 def _generate_swift_object_name_prefix(self, backup):
     az = 'az_%s' % self.az
     backup_name = '%s_backup_%s' % (az, backup['id'])
     volume = 'volume_%s' % (backup['volume_id'])
     timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
     prefix = volume + '/' + timestamp + '/' + backup_name
     LOG.debug('_generate_swift_object_name_prefix: %s' % prefix)
     return prefix
Пример #24
0
 def _new_oauth_token_with_expires_at(self):
     key, secret, token = self._new_oauth_token()
     expires_at = timeutils.strtime()
     params = {'oauth_token': key,
               'oauth_token_secret': secret,
               'oauth_expires_at': expires_at}
     token = urlparse.urlencode(params)
     return (key, secret, expires_at, token)
 def _register_cfg_agent(self):
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': FIRST_CFG_AGENT},
                           time=timeutils.strtime())
     agent_db = self.core_plugin.get_agents_db(self.adminContext,
                                               filters={'host': [HOST]})
     self.agent_id1 = agent_db[0].id
Пример #26
0
    def _sanitizer(self, obj):
        def to_primitive(_type, _value):
            return {"_type": _type, "_value": _value}

        if isinstance(obj, datetime.datetime):
            return to_primitive("datetime", timeutils.strtime(obj))

        return super(RPCJSONSerializer, self)._sanitizer(obj)
Пример #27
0
 def _register_ml2_agents(self):
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_2},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_3},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_4},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_5},
                           time=timeutils.strtime())
Пример #28
0
 def _generate_object_name_prefix(self, backup):
     """Generates a oNest backup object name prefix.
        Warning: oNest Object name has a limited length.
     """
     timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
     prefix = timestamp + '_' + backup['id']
     LOG.debug('Object name prefix: %s.', prefix)
     return prefix
Пример #29
0
 def _generate_swift_object_name_prefix(self, backup):
     az = 'az_%s' % self.az
     backup_name = '%s_backup_%s' % (az, backup['id'])
     volume = 'volume_%s' % (backup['volume_id'])
     timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
     prefix = volume + '/' + timestamp + '/' + backup_name
     LOG.debug('_generate_swift_object_name_prefix: %s' % prefix)
     return prefix
Пример #30
0
 def _register_cfg_agent(self):
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': FIRST_CFG_AGENT},
                           time=timeutils.strtime())
     agent_db = self.core_plugin.get_agents_db(self.adminContext,
                                               filters={'host': [HOST]})
     self.agent_id1 = agent_db[0].id
    def test_validate_ec2_timestamp_advanced_time(self):

        # EC2 request with Timestamp in advanced time
        timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
        params = {'Timestamp': timeutils.strtime(timestamp,
                                                 "%Y-%m-%dT%H:%M:%SZ")}
        expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
        self.assertFalse(expired)
Пример #32
0
    def _sanitizer(self, obj):
        def to_primitive(_type, _value):
            return {"_type": _type, "_value": _value}

        if isinstance(obj, datetime.datetime):
            return to_primitive("datetime", timeutils.strtime(obj))

        return super(RPCJSONSerializer, self)._sanitizer(obj)
Пример #33
0
 def _register_ml2_agents(self):
     callback = agents_db.AgentExtRpcCallback()
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_2},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_3},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_4},
                           time=timeutils.strtime())
     callback.report_state(self.adminContext,
                           agent_state={'agent_state': L2_AGENT_5},
                           time=timeutils.strtime())
Пример #34
0
 def report_state(self, context, agent_state, use_call=False):
     cctxt = self.client.prepare()
     kwargs = {
         'agent_state': {'agent_state': agent_state},
         'time': timeutils.strtime(),
     }
     method = cctxt.call if use_call else cctxt.cast
     return method(context, 'report_state', **kwargs)
Пример #35
0
 def _new_oauth_token_with_expires_at(self):
     key, secret, token = self._new_oauth_token()
     expires_at = timeutils.strtime()
     params = {'oauth_token': key,
               'oauth_token_secret': secret,
               'oauth_expires_at': expires_at}
     token = urlparse.urlencode(params)
     return (key, secret, expires_at, token)
Пример #36
0
    def manage_existing(self, volume, external_ref):
        """Manages an existing SolidFire Volume (import to Cinder).

        Renames the Volume to match the expected name for the volume.
        Also need to consider things like QoS, Emulation, account/tenant.
        """

        sfid = external_ref.get('source-id', None)
        sfname = external_ref.get('name', None)
        if sfid is None:
            raise exception.SolidFireAPIException("Manage existing volume "
                                                  "requires 'source-id'.")

        # First get the volume on the SF cluster (MUST be active)
        params = {'startVolumeID': sfid, 'limit': 1}
        data = self._issue_api_request('ListActiveVolumes', params)
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
        sf_ref = data['result']['volumes'][0]

        sfaccount = self._create_sfaccount(volume['project_id'])

        attributes = {}
        qos = {}
        if (self.configuration.sf_allow_tenant_qos
                and volume.get('volume_metadata') is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume.get('volume_type_id', None)
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        import_time = timeutils.strtime(volume['created_at'])
        attributes = {
            'uuid': volume['id'],
            'is_clone': 'False',
            'os_imported_at': import_time,
            'old_name': sfname
        }
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {
            'name': volume['name'],
            'volumeID': sf_ref['volumeID'],
            'accountID': sfaccount['accountID'],
            'enable512e': self.configuration.sf_emulate_512,
            'attributes': attributes,
            'qos': qos
        }

        data = self._issue_api_request('ModifyVolume', params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)

        return self._get_model_info(sfaccount, sf_ref['volumeID'])
Пример #37
0
    def _register_agent_states(self, lbaas_agents=False):
        """Register two L3 agents and two DHCP agents."""
        l3_hosta = helpers._get_l3_agent_dict(
            L3_HOSTA, constants.L3_AGENT_MODE_LEGACY)
        l3_hostb = helpers._get_l3_agent_dict(
            L3_HOSTB, constants.L3_AGENT_MODE_LEGACY)
        dhcp_hosta = {
            'binary': 'neutron-dhcp-agent',
            'host': DHCP_HOSTA,
            'topic': 'DHCP_AGENT',
            'configurations': {'dhcp_driver': 'dhcp_driver',
                               'use_namespaces': True,
                               },
            'agent_type': constants.AGENT_TYPE_DHCP}
        dhcp_hostc = copy.deepcopy(dhcp_hosta)
        dhcp_hostc['host'] = DHCP_HOSTC
        lbaas_hosta = {
            'binary': 'neutron-loadbalancer-agent',
            'host': LBAAS_HOSTA,
            'topic': 'LOADBALANCER_AGENT',
            'configurations': {'device_drivers': ['haproxy_ns']},
            'agent_type': constants.AGENT_TYPE_LOADBALANCER}
        lbaas_hostb = copy.deepcopy(lbaas_hosta)
        lbaas_hostb['host'] = LBAAS_HOSTB
        callback = agents_db.AgentExtRpcCallback()
        helpers.register_l3_agent(host=L3_HOSTA)
        helpers.register_l3_agent(host=L3_HOSTB)
        callback.report_state(self.adminContext,
                              agent_state={'agent_state': dhcp_hosta},
                              time=timeutils.strtime())
        callback.report_state(self.adminContext,
                              agent_state={'agent_state': dhcp_hostc},
                              time=timeutils.strtime())

        res = [l3_hosta, l3_hostb, dhcp_hosta, dhcp_hostc]
        if lbaas_agents:
            callback.report_state(self.adminContext,
                                  agent_state={'agent_state': lbaas_hosta},
                                  time=timeutils.strtime())
            callback.report_state(self.adminContext,
                                  agent_state={'agent_state': lbaas_hostb},
                                  time=timeutils.strtime())
            res += [lbaas_hosta, lbaas_hostb]

        return res
Пример #38
0
 def start(self):
     fmt = self.name + "--%Y-%m-%d--%H%M%S.log"
     cmd = [spawn.find_executable(self.exec_name),
            '--log-dir', DEFAULT_LOG_DIR,
            '--log-file', timeutils.strtime(fmt=fmt)]
     for filename in self.config_filenames:
         cmd += ['--config-file', filename]
     self.process = async_process.AsyncProcess(cmd)
     self.process.start(block=True)
Пример #39
0
    def create_volume(self, volume):
        """Create volume on SolidFire device.

        The account is where CHAP settings are derived from, volume is
        created and exported.  Note that the new volume is immediately ready
        for use.

        One caveat here is that an existing user account must be specified
        in the API call to create a new volume.  We use a set algorithm to
        determine account info based on passed in cinder volume object.  First
        we check to see if the account already exists (and use it), or if it
        does not already exist, we'll go ahead and create it.

        """
        slice_count = 1
        attributes = {}
        qos = {}

        if (self.configuration.sf_allow_tenant_qos
                and volume.get('volume_metadata') is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume['volume_type_id']
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        create_time = timeutils.strtime(volume['created_at'])
        attributes = {
            'uuid': volume['id'],
            'is_clone': 'False',
            'created_at': create_time
        }
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {
            'name': 'UUID-%s' % volume['id'],
            'accountID': None,
            'sliceCount': slice_count,
            'totalSize': int(volume['size'] * units.Gi),
            'enable512e': self.configuration.sf_emulate_512,
            'attributes': attributes,
            'qos': qos
        }

        # NOTE(jdg): Check if we're a migration tgt, if so
        # use the old volume-id here for the SF Name
        migration_status = volume.get('migration_status', None)
        if migration_status and 'target' in migration_status:
            k, v = migration_status.split(':')
            params['name'] = 'UUID-%s' % v
            params['attributes']['migration_uuid'] = volume['id']
            params['attributes']['uuid'] = v

        return self._do_volume_create(volume['project_id'], params)
Пример #40
0
    def test_delete_audit_template_by_name(self, mock_utcnow):
        test_time = datetime.datetime(2000, 1, 1, 0, 0)
        mock_utcnow.return_value = test_time
        self.delete(
            urlparse.quote('/audit_templates/%s' % self.audit_template.name))
        response = self.get_json(urlparse.quote('/audit_templates/%s' %
                                                self.audit_template.name),
                                 expect_errors=True)
        self.assertEqual(404, response.status_int)
        self.assertEqual('application/json', response.content_type)
        self.assertTrue(response.json['error_message'])

        self.context.show_deleted = True
        audit_template = objects.AuditTemplate.get_by_name(
            self.context, self.audit_template.name)

        return_deleted_at = timeutils.strtime(audit_template['deleted_at'])
        self.assertEqual(timeutils.strtime(test_time), return_deleted_at)
Пример #41
0
    def test_is_in_progress(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'my-policy')

        now = timeutils.utcnow()
        previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertTrue(pol._cooldown_inprogress())
Пример #42
0
    def test_not_in_progress(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'my-policy')

        awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
        previous_meta = {timeutils.strtime(awhile_ago): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertFalse(pol._cooldown_inprogress())
Пример #43
0
 def fake_get_timestamp(ds_browser, ds_path):
     self.assertEqual('fake-ds-browser', ds_browser)
     self.assertEqual('[fake-ds] fake-path', str(ds_path))
     if not self.exists:
         return
     ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
             timeutils.strtime(at=self._time,
                               fmt=imagecache.TIMESTAMP_FORMAT))
     return ts
Пример #44
0
 def fake_get_timestamp(ds_browser, ds_path):
     self.assertEqual('fake-ds-browser', ds_browser)
     self.assertEqual('[fake-ds] fake-path', str(ds_path))
     if not self.exists:
         return
     ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
             timeutils.strtime(at=self._time,
                               fmt=imagecache.TIMESTAMP_FORMAT))
     return ts
Пример #45
0
    def test_delete_audit_template_by_name(self, mock_utcnow):
        test_time = datetime.datetime(2000, 1, 1, 0, 0)
        mock_utcnow.return_value = test_time
        self.delete(urlparse.quote('/audit_templates/%s' %
                                   self.audit_template.name))
        response = self.get_json(urlparse.quote(
            '/audit_templates/%s' % self.audit_template.name),
            expect_errors=True)
        self.assertEqual(404, response.status_int)
        self.assertEqual('application/json', response.content_type)
        self.assertTrue(response.json['error_message'])

        self.context.show_deleted = True
        audit_template = objects.AuditTemplate.get_by_name(
            self.context, self.audit_template.name)

        return_deleted_at = timeutils.strtime(audit_template['deleted_at'])
        self.assertEqual(timeutils.strtime(test_time), return_deleted_at)
Пример #46
0
    def test_not_in_progress(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'my-policy')

        awhile_ago = timeutils.utcnow() - datetime.timedelta(seconds=100)
        previous_meta = {timeutils.strtime(awhile_ago): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertFalse(pol._cooldown_inprogress())
Пример #47
0
    def test_is_in_progress(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'my-policy')

        now = timeutils.utcnow()
        previous_meta = {timeutils.strtime(now): 'ChangeInCapacity : 1'}
        self.patchobject(pol, 'metadata_get', return_value=previous_meta)
        self.assertTrue(pol._cooldown_inprogress())
Пример #48
0
    def manage_existing(self, volume, external_ref):
        """Manages an existing SolidFire Volume (import to Cinder).

        Renames the Volume to match the expected name for the volume.
        Also need to consider things like QoS, Emulation, account/tenant.
        """

        sfid = external_ref.get('source-id', None)
        sfname = external_ref.get('name', None)
        if sfid is None:
            raise exception.SolidFireAPIException("Manage existing volume "
                                                  "requires 'source-id'.")

        # First get the volume on the SF cluster (MUST be active)
        params = {'startVolumeID': sfid,
                  'limit': 1}
        data = self._issue_api_request('ListActiveVolumes', params)
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
        sf_ref = data['result']['volumes'][0]

        sfaccount = self._create_sfaccount(volume['project_id'])

        attributes = {}
        qos = {}
        if (self.configuration.sf_allow_tenant_qos and
                volume.get('volume_metadata')is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume.get('volume_type_id', None)
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        import_time = timeutils.strtime(volume['created_at'])
        attributes = {'uuid': volume['id'],
                      'is_clone': 'False',
                      'os_imported_at': import_time,
                      'old_name': sfname}
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {'name': volume['name'],
                  'volumeID': sf_ref['volumeID'],
                  'accountID': sfaccount['accountID'],
                  'enable512e': self.configuration.sf_emulate_512,
                  'attributes': attributes,
                  'qos': qos}

        data = self._issue_api_request('ModifyVolume',
                                       params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)

        return self._get_model_info(sfaccount, sf_ref['volumeID'])
Пример #49
0
 def test_get_all(self, mock_get_all):
     fake_task_logs = [dict(fake_task_log, id=1), dict(fake_task_log, id=2)]
     mock_get_all.return_value = fake_task_logs
     task_logs = objects.TaskLogList.get_all(
         self.context,
         fake_task_log['task_name'],
         fake_task_log['period_beginning'],
         fake_task_log['period_ending'],
         host=fake_task_log['host'],
         state=fake_task_log['state'])
     mock_get_all.assert_called_once_with(
         self.context,
         fake_task_log['task_name'],
         timeutils.strtime(at=fake_task_log['period_beginning']),
         timeutils.strtime(at=fake_task_log['period_ending']),
         host=fake_task_log['host'],
         state=fake_task_log['state'])
     for index, task_log in enumerate(task_logs):
         self.compare_obj(task_log, fake_task_logs[index])
Пример #50
0
 def report_state(self, context, agent_state, use_call=False):
     cctxt = self.client.prepare()
     kwargs = {
         'agent_state': {
             'agent_state': agent_state
         },
         'time': timeutils.strtime(),
     }
     method = cctxt.call if use_call else cctxt.cast
     return method(context, 'report_state', **kwargs)
Пример #51
0
 def to_dict(self):
     return {
         'name': self.name,
         'value': self.value,
         # NOTE(jaypipes): This is what jsonutils.dumps() does to
         # datetime.datetime objects, which is what timestamp is in
         # this object as well as the original simple dict metrics
         'timestamp': timeutils.strtime(self.timestamp),
         'source': self.source
     }
Пример #52
0
 def test_get_all(self, mock_get_all):
     fake_task_logs = [dict(fake_task_log, id=1), dict(fake_task_log, id=2)]
     mock_get_all.return_value = fake_task_logs
     task_logs = objects.TaskLogList.get_all(
         self.context,
         fake_task_log['task_name'],
         fake_task_log['period_beginning'],
         fake_task_log['period_ending'],
         host=fake_task_log['host'],
         state=fake_task_log['state'])
     mock_get_all.assert_called_once_with(
         self.context,
         fake_task_log['task_name'],
         timeutils.strtime(at=fake_task_log['period_beginning']),
         timeutils.strtime(at=fake_task_log['period_ending']),
         host=fake_task_log['host'],
         state=fake_task_log['state'])
     for index, task_log in enumerate(task_logs):
         self.compare_obj(task_log, fake_task_logs[index])
Пример #53
0
 def test_parse_last_modified(self):
     self.assertIsNone(self.swift_plugin.parse_last_modified(None))
     now = datetime.datetime(
         2015, 2, 5, 1, 4, 40, 0, pytz.timezone('GMT'))
     now_naive = datetime.datetime(
         2015, 2, 5, 1, 4, 40, 0)
     last_modified = timeutils.strtime(now, '%a, %d %b %Y %H:%M:%S %Z')
     self.assertEqual('Thu, 05 Feb 2015 01:04:40 GMT', last_modified)
     self.assertEqual(
         now_naive,
         self.swift_plugin.parse_last_modified(last_modified))
Пример #54
0
    def test_metadata_is_written(self):
        t = template_format.parse(as_template)
        stack = utils.parse_stack(t, params=as_params)
        pol = self.create_scaling_policy(t, stack, 'my-policy')

        nowish = timeutils.strtime()
        reason = 'cool as'
        meta_set = self.patchobject(pol, 'metadata_set')
        self.patchobject(timeutils, 'strtime', return_value=nowish)
        pol._cooldown_timestamp(reason)
        meta_set.assert_called_once_with({nowish: reason})
Пример #55
0
    def create_volume(self, volume):
        """Create volume on SolidFire device.

        The account is where CHAP settings are derived from, volume is
        created and exported.  Note that the new volume is immediately ready
        for use.

        One caveat here is that an existing user account must be specified
        in the API call to create a new volume.  We use a set algorithm to
        determine account info based on passed in cinder volume object.  First
        we check to see if the account already exists (and use it), or if it
        does not already exist, we'll go ahead and create it.

        """
        slice_count = 1
        attributes = {}
        qos = {}

        if (self.configuration.sf_allow_tenant_qos and
                volume.get('volume_metadata')is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume['volume_type_id']
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        create_time = timeutils.strtime(volume['created_at'])
        attributes = {'uuid': volume['id'],
                      'is_clone': 'False',
                      'created_at': create_time}
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {'name': 'UUID-%s' % volume['id'],
                  'accountID': None,
                  'sliceCount': slice_count,
                  'totalSize': int(volume['size'] * units.Gi),
                  'enable512e': self.configuration.sf_emulate_512,
                  'attributes': attributes,
                  'qos': qos}

        # NOTE(jdg): Check if we're a migration tgt, if so
        # use the old volume-id here for the SF Name
        migration_status = volume.get('migration_status', None)
        if migration_status and 'target' in migration_status:
            k, v = migration_status.split(':')
            params['name'] = 'UUID-%s' % v
            params['attributes']['migration_uuid'] = volume['id']
            params['attributes']['uuid'] = v

        return self._do_volume_create(volume['project_id'], params)