예제 #1
0
def convert_image(source, dest, out_format, run_as_root=True):
    """Convert image to other format."""

    cmd = ('qemu-img', 'convert',
                    '-O', out_format, source, dest)

    start_time = timeutils.utcnow()
    utils.execute(*cmd, run_as_root=run_as_root)
    duration = timeutils.delta_seconds(start_time, timeutils.utcnow())

    # NOTE(jdg): use a default of 1, mostly for unit test, but in
    # some incredible event this is 0 (cirros image?) don't barf
    if duration < 1:
        duration = 1
    fsz_mb = os.stat(source).st_size / units.Mi
    mbps = (fsz_mb / duration)
    msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, "
           "duration %(duration).2f sec, destination %(dest)s")
    LOG.debug(msg % {"src": source,
                     "sz": fsz_mb,
                     "duration": duration,
                     "dest": dest})

    msg = _("Converted %(sz).2f MB image at %(mbps).2f MB/s")
    LOG.info(msg % {"sz": fsz_mb, "mbps": mbps})
예제 #2
0
    def _check_transient(self):
        pig_job_data = self.edp_info.read_pig_example_script()
        pig_lib_data = self.edp_info.read_pig_example_jar()
        self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
                         job_data_list=[{'pig': pig_job_data}],
                         lib_data_list=[{'jar': pig_lib_data}])

        # set timeout in seconds
        timeout = self.common_config.TRANSIENT_CLUSTER_TIMEOUT * 60
        s_time = timeutils.utcnow()
        raise_failure = True
        # wait for cluster deleting
        while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
            try:
                self.sahara.clusters.get(self.cluster_id)
            except sab.APIException as api_ex:
                if 'not found' in api_ex.message:
                    raise_failure = False
                    break
            time.sleep(2)

        if raise_failure:
            self.fail('Transient cluster has not been deleted within %s '
                      'minutes.'
                      % self.common_config.TRANSIENT_CLUSTER_TIMEOUT)
예제 #3
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_LW('task run outlasted interval by %s sec'),
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
예제 #4
0
    def _create_env_template_services(self):
        fake_now = timeutils.utcnow()
        timeutils.utcnow.override_time = fake_now

        self.expect_policy_check('create_env_template')

        fake_now = timeutils.utcnow()
        timeutils.utcnow.override_time = fake_now
        body = {
            "name": "env_template_name",
            "services": [
                {
                    "instance": {
                        "assignFloatingIp": "true",
                        "keyname": "mykeyname",
                        "image": "cloud-fedora-v3",
                        "flavor": "m1.medium",
                        "?": {
                            "type": "io.murano.resources.Linux",
                            "id": "ef984a74-29a4-45c0-b1dc-2ab9f075732e"
                        }
                    },
                    "name": "orion",
                    "port": "8080",
                    "?": {
                        "type": "io.murano.apps.apache.Tomcat",
                        "id": "54cea43d-5970-4c73-b9ac-fea656f3c722"
                    }
                }
            ]
        }

        req = self._post('/templates', json.dumps(body))
        req.get_response(self.api)
예제 #5
0
파일: volumes.py 프로젝트: degorenko/sahara
def _detach_volume(instance, volume_id):
    volume = cinder.get_volume(volume_id)
    try:
        LOG.debug("Detaching volume %s from instance %s" % (
            volume_id, instance.instance_name))
        nova.client().volumes.delete_server_volume(instance.instance_id,
                                                   volume_id)
    except Exception:
        LOG.exception(_LE("Can't detach volume %s"), volume.id)

    detach_timeout = CONF.detach_volume_timeout
    LOG.debug("Waiting %d seconds to detach %s volume" % (detach_timeout,
                                                          volume_id))
    s_time = tu.utcnow()
    while tu.delta_seconds(s_time, tu.utcnow()) < detach_timeout:
        volume = cinder.get_volume(volume_id)
        if volume.status not in ['available', 'error']:
            context.sleep(2)
        else:
            LOG.debug("Volume %s has been detached" % volume_id)
            return
    else:
        LOG.warn(_LW("Can't detach volume %(volume)s. "
                     "Current status of volume: %(status)s"),
                 {'volume': volume_id, 'status': volume.status})
예제 #6
0
파일: mongo.py 프로젝트: AsherBond/keystone
 def _get_doc_date(self):
     if self.ttl_seconds > 0:
         expire_delta = datetime.timedelta(seconds=self.ttl_seconds)
         doc_date = timeutils.utcnow() + expire_delta
     else:
         doc_date = timeutils.utcnow()
     return doc_date
 def get(key):
     if key == "tokens/%s" % VALID_TOKEN:
         dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
         return json.dumps(({'access': {
             'token': {'id': VALID_TOKEN,
                       'expires': timeutils.isotime(dt)},
             'user': {
                 'id': 'user_id1',
                 'name': 'user_name1',
                 'tenantId': '123i2910',
                 'tenantName': 'mytenant',
                 'roles': [
                     {'name': 'admin'},
                 ]},
         }}, timeutils.isotime(dt)))
     if key == "tokens/%s" % VALID_TOKEN2:
         dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
         return json.dumps(({'access': {
             'token': {'id': VALID_TOKEN2,
                       'expires': timeutils.isotime(dt)},
             'user': {
                 'id': 'user_id2',
                 'name': 'user-good',
                 'tenantId': 'project-good',
                 'tenantName': 'goodies',
                 'roles': [
                     {'name': 'Member'},
                 ]},
         }}, timeutils.isotime(dt)))
예제 #8
0
 def inspect_container(self, container_id):
     if container_id not in self._containers:
         return
     container = self._containers[container_id]
     info = {
         'Args': [],
         'Config': container['Config'],
         'Created': str(timeutils.utcnow()),
         'Id': container_id,
         'Image': self._fake_id(),
         'NetworkSettings': {
             'Bridge': '',
             'Gateway': '',
             'IPAddress': '',
             'IPPrefixLen': 0,
             'PortMapping': None
         },
         'Path': 'bash',
         'ResolvConfPath': '/etc/resolv.conf',
         'State': {
             'ExitCode': 0,
             'Ghost': False,
             'Pid': 0,
             'Running': container['running'],
             'StartedAt': str(timeutils.utcnow())
         },
         'SysInitPath': '/tmp/docker',
         'Volumes': {},
     }
     return info
예제 #9
0
    def _wait_on_task_execution(self):
        """Wait until all the tasks have finished execution and are in
        state of success or failure.
        """

        start = timeutils.utcnow()

        # wait for maximum of 5 seconds
        while timeutils.delta_seconds(start, timeutils.utcnow()) < 5:
            wait = False
            # Verify that no task is in status of pending or processing
            path = "/v2/tasks"
            res, content = self.http.request(path, 'GET',
                                             headers=minimal_task_headers())
            content_dict = json.loads(content)

            self.assertEqual(res.status, 200)
            res_tasks = content_dict['tasks']
            if len(res_tasks) != 0:
                for task in res_tasks:
                    if task['status'] in ('pending', 'processing'):
                        wait = True
                        break

            if wait:
                time.sleep(0.05)
                continue
            else:
                break
예제 #10
0
    def test_snapshot_index_detail_serializer(self):
        serializer = volumes.SnapshotsTemplate()
        raw_snapshots = [dict(
                id='snap1_id',
                status='snap1_status',
                size=1024,
                createdAt=timeutils.utcnow(),
                displayName='snap1_name',
                displayDescription='snap1_desc',
                volumeId='vol1_id',
                ),
                       dict(
                id='snap2_id',
                status='snap2_status',
                size=1024,
                createdAt=timeutils.utcnow(),
                displayName='snap2_name',
                displayDescription='snap2_desc',
                volumeId='vol2_id',
                )]
        text = serializer.serialize(dict(snapshots=raw_snapshots))

        tree = etree.fromstring(text)

        self.assertEqual('snapshots', tree.tag)
        self.assertEqual(len(raw_snapshots), len(tree))
        for idx, child in enumerate(tree):
            self._verify_snapshot(raw_snapshots[idx], child)
예제 #11
0
    def test_evaluate_ceilometer_controlled(self):
        rule = {'EvaluationPeriods': '1',
                'MetricName': 'test_metric',
                'Period': '300',
                'Statistic': 'Maximum',
                'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
                'Threshold': '30'}

        now = timeutils.utcnow()
        self.m.StubOutWithMock(timeutils, 'utcnow')
        timeutils.utcnow().MultipleTimes().AndReturn(now)
        self.m.ReplayAll()

        # Now data breaches Threshold, but we're suspended
        last = now - datetime.timedelta(seconds=300)
        data = WatchData(35, now - datetime.timedelta(seconds=150))
        self.wr = watchrule.WatchRule(context=self.ctx,
                                      watch_name="testwatch",
                                      rule=rule,
                                      watch_data=[data],
                                      stack_id=self.stack_id,
                                      last_evaluated=last)

        self.wr.state_set(self.wr.CEILOMETER_CONTROLLED)

        actions = self.wr.evaluate()
        self.assertEqual(self.wr.CEILOMETER_CONTROLLED, self.wr.state)
        self.assertEqual([], actions)
예제 #12
0
파일: deploy.py 프로젝트: viplav/sahara
def _start_cloudera_manager(cluster):
    manager = pu.get_manager(cluster)
    with manager.remote() as r:
        cmd.start_cloudera_db(r)
        cmd.start_manager(r)

    timeout = 300
    LOG.debug("Waiting %(timeout)s seconds for Manager to start : " % {
        'timeout': timeout})
    s_time = timeutils.utcnow()
    while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
        try:
            conn = telnetlib.Telnet(manager.management_ip, CM_API_PORT)
            conn.close()
            break
        except IOError:
            context.sleep(2)
    else:
        message = _("Cloudera Manager failed to start in %(timeout)s minutes "
                    "on node '%(node)s' of cluster '%(cluster)s'") % {
                        'timeout': timeout / 60,
                        'node': manager.management_ip,
                        'cluster': cluster.name}
        raise ex.HadoopProvisionError(message)

    LOG.info(_LI("Cloudera Manager has been started"))
예제 #13
0
 def test_list_since(self):
     self.revoke_api.revoke_by_user(user_id=1)
     self.revoke_api.revoke_by_user(user_id=2)
     past = timeutils.utcnow() - datetime.timedelta(seconds=1000)
     self.assertEqual(2, len(self.revoke_api.get_events(past)))
     future = timeutils.utcnow() + datetime.timedelta(seconds=1000)
     self.assertEqual(0, len(self.revoke_api.get_events(future)))
예제 #14
0
파일: session.py 프로젝트: mzlatkova/murano
def _get_or_create_lock(name, session, nested, retry=0):
    if nested:
        session.begin_nested()
    else:
        session.begin()
    existing = session.query(Lock).get(name)
    if existing is None:
        try:
            # no lock found, creating a new one
            lock = Lock(id=name, ts=timeutils.utcnow())
            lock.save(session)
            return session.transaction
            # lock created and acquired
        except exception.DBDuplicateEntry:
            session.rollback()
            if retry >= MAX_LOCK_RETRIES:
                raise
            else:
                # other transaction has created a lock, repeat to acquire
                # via update
                return _get_or_create_lock(name, session, nested, retry + 1)
    else:
        # lock found, acquiring by doing update
        existing.ts = timeutils.utcnow()
        existing.save(session)
        return session.transaction
예제 #15
0
    def test_utcnow(self):
        timeutils.set_time_override(mock.sentinel.utcnow)
        self.assertEqual(timeutils.utcnow(), mock.sentinel.utcnow)

        timeutils.clear_time_override()
        self.assertFalse(timeutils.utcnow() == mock.sentinel.utcnow)

        self.assertTrue(timeutils.utcnow())
예제 #16
0
def get_test_conductor(**kw):
    return {
        'id': kw.get('id', 6),
        'hostname': kw.get('hostname', 'test-conductor-node'),
        'drivers': kw.get('drivers', ['fake-driver', 'null-driver']),
        'created_at': kw.get('created_at', timeutils.utcnow()),
        'updated_at': kw.get('updated_at', timeutils.utcnow()),
    }
예제 #17
0
    def test_service_is_up(self):
        serv = self.useFixture(
            ServiceFixture(self._host, self._binary, self._topic)).serv
        serv.start()
        service_ref = db.service_get_by_args(self._ctx,
                                             self._host,
                                             self._binary)
        fake_now = 1000
        down_time = 15
        self.flags(service_down_time=down_time)
        self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
        self.servicegroup_api = servicegroup.API()
        hostkey = str("%s:%s" % (self._topic, self._host))

        # Up (equal)
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertTrue(result)

        self.mox.ResetAll()
        # Up
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertTrue(result)

        self.mox.ResetAll()
        # Down
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertFalse(result)

        self.mox.ResetAll()
        # Down
        timeutils.utcnow_ts().AndReturn(fake_now)
        timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
        self.mox.ReplayAll()
        self.servicegroup_api._driver.mc.set(hostkey,
                                             timeutils.utcnow(),
                                             time=down_time)
        result = self.servicegroup_api.service_is_up(service_ref)
        self.assertFalse(result)

        self.mox.ResetAll()
예제 #18
0
def stub_instance(id, user_id='fake', project_id='fake', host=None,
                  vm_state=None, task_state=None,
                  reservation_id="", uuid=FAKE_UUID, image_ref="10",
                  flavor_id="1", name=None, key_name='',
                  access_ipv4=None, access_ipv6=None, progress=0):

    if host is not None:
        host = str(host)

    if key_name:
        key_data = 'FAKE'
    else:
        key_data = ''

    # ReservationID isn't sent back, hack it in there.
    server_name = name or "server%s" % id
    if reservation_id != "":
        server_name = "reservation_%s" % (reservation_id, )

    instance = {
        "id": int(id),
        "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
        "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
        "admin_pass": "",
        "user_id": user_id,
        "project_id": project_id,
        "image_ref": image_ref,
        "kernel_id": "",
        "ramdisk_id": "",
        "launch_index": 0,
        "key_name": key_name,
        "key_data": key_data,
        "vm_state": vm_state or vm_states.BUILDING,
        "task_state": task_state,
        "memory_mb": 0,
        "vcpus": 0,
        "root_gb": 0,
        "hostname": "",
        "host": host,
        "instance_type": {},
        "user_data": "",
        "reservation_id": reservation_id,
        "mac_address": "",
        "scheduled_at": timeutils.utcnow(),
        "launched_at": timeutils.utcnow(),
        "terminated_at": timeutils.utcnow(),
        "availability_zone": "",
        "display_name": server_name,
        "display_description": "",
        "locked": False,
        "metadata": [],
        "access_ip_v4": access_ipv4,
        "access_ip_v6": access_ipv6,
        "uuid": uuid,
        "progress": progress}

    return instance
예제 #19
0
    def test_evaluate(self):
        rule = {'EvaluationPeriods': '1',
                'MetricName': 'test_metric',
                'Period': '300',
                'Statistic': 'Maximum',
                'ComparisonOperator': 'GreaterThanOrEqualToThreshold',
                'Threshold': '30'}

        now = timeutils.utcnow()
        self.m.StubOutWithMock(timeutils, 'utcnow')
        timeutils.utcnow().MultipleTimes().AndReturn(now)
        self.m.ReplayAll()

        # It's not time to evaluate, so should stay NODATA
        last = now - datetime.timedelta(seconds=299)
        data = WatchData(25, now - datetime.timedelta(seconds=150))
        self.wr = watchrule.WatchRule(context=self.ctx,
                                      watch_name="testwatch",
                                      rule=rule,
                                      watch_data=[data],
                                      stack_id=self.stack_id,
                                      last_evaluated=last)

        actions = self.wr.evaluate()
        self.assertEqual('NODATA', self.wr.state)
        self.assertEqual([], actions)

        # now - last == Period, so should set NORMAL
        last = now - datetime.timedelta(seconds=300)
        data = WatchData(25, now - datetime.timedelta(seconds=150))
        self.wr = watchrule.WatchRule(context=self.ctx,
                                      watch_name="testwatch",
                                      rule=rule,
                                      watch_data=[data],
                                      stack_id=self.stack_id,
                                      last_evaluated=last)

        actions = self.wr.evaluate()
        self.assertEqual('NORMAL', self.wr.state)
        self.assertEqual(now, self.wr.last_evaluated)
        self.assertEqual([], actions)

        # Now data breaches Threshold, so should set ALARM
        last = now - datetime.timedelta(seconds=300)
        data = WatchData(35, now - datetime.timedelta(seconds=150))
        self.wr = watchrule.WatchRule(context=self.ctx,
                                      watch_name="testwatch",
                                      rule=rule,
                                      watch_data=[data],
                                      stack_id=self.stack_id,
                                      last_evaluated=last)

        actions = self.wr.evaluate()
        self.assertEqual('ALARM', self.wr.state)
        self.assertEqual(now, self.wr.last_evaluated)
        self.assertEqual([], actions)
예제 #20
0
    def test_update_environment(self):
        """Check that environment rename works."""
        self._set_policy_rules(
            {'show_environment': '@',
             'update_environment': '@'}
        )
        self.expect_policy_check('update_environment',
                                 {'environment_id': '12345'})

        fake_now = timeutils.utcnow()
        timeutils.utcnow.override_time = fake_now

        expected = dict(
            id='12345',
            name='my-env',
            version=0,
            networking={},
            created=fake_now,
            updated=fake_now,
            tenant_id=self.tenant,
            description={
                'Objects': {
                    '?': {'id': '12345'}
                },
                'Attributes': []
            }
        )
        e = models.Environment(**expected)
        test_utils.save_models(e)

        fake_now = timeutils.utcnow()
        timeutils.utcnow.override_time = fake_now

        del expected['description']
        expected['services'] = []
        expected['status'] = 'ready'
        expected['name'] = 'renamed_env'
        expected['updated'] = fake_now

        body = {
            'name': 'renamed_env'
        }
        req = self._put('/environments/12345', json.dumps(body))
        result = req.get_response(self.api)
        self.assertEqual(200, result.status_code)

        self.expect_policy_check('show_environment',
                                 {'environment_id': '12345'})
        req = self._get('/environments/12345')
        result = req.get_response(self.api)
        self.assertEqual(200, result.status_code)

        expected['created'] = timeutils.isotime(expected['created'])[:-1]
        expected['updated'] = timeutils.isotime(expected['updated'])[:-1]

        self.assertEqual(expected, json.loads(result.body))
예제 #21
0
    def test_is_soon(self):
        expires = timeutils.utcnow() + datetime.timedelta(minutes=5)
        self.assertFalse(timeutils.is_soon(expires, 120))
        self.assertTrue(timeutils.is_soon(expires, 300))
        self.assertTrue(timeutils.is_soon(expires, 600))

        with mock.patch('datetime.datetime') as datetime_mock:
            datetime_mock.utcnow.return_value = self.skynet_self_aware_time
            expires = timeutils.utcnow()
            self.assertTrue(timeutils.is_soon(expires, 0))
def upgrade():
    ip_policy = table('quark_ip_policy',
                      column('id', sa.String(length=36)),
                      column('tenant_id', sa.String(length=255)),
                      column('created_at', sa.DateTime()))
    ip_policy_cidrs = table('quark_ip_policy_cidrs',
                            column('id', sa.String(length=36)),
                            column('created_at', sa.DateTime()),
                            column('ip_policy_id', sa.String(length=36)),
                            column('cidr', sa.String(length=64)))
    subnets = table('quark_subnets',
                    column('id', sa.String(length=36)),
                    column('_cidr', sa.String(length=64)),
                    column('tenant_id', sa.String(length=255)),
                    column('ip_policy_id', sa.String(length=36)))

    connection = op.get_bind()

    # 1. Find all subnets without ip_policy.
    data = connection.execute(select([
        subnets.c.id, subnets.c._cidr, subnets.c.tenant_id]).where(
            subnets.c.ip_policy_id == None)).fetchall()  # noqa
    if not data:
        return

    LOG.info("Subnet IDs without IP policies: %s", [d[0] for d in data])

    # 2. Insert ip_policy rows with id.
    vals = [dict(id=uuidutils.generate_uuid(),
                 created_at=timeutils.utcnow(),
                 tenant_id=tenant_id)
            for id, cidr, tenant_id in data]

    LOG.info("IP Policy IDs to insert: %s", [v["id"] for v in vals])
    connection.execute(ip_policy.insert(), *vals)

    # 3. Insert default ip_policy_cidrs for those ip_policy's.
    vals2 = []
    for ((id, cidr, tenant_id), ip_policy) in zip(data, vals):
        cidrs = []
        ip_policies.ensure_default_policy(cidrs, [dict(cidr=cidr)])
        for cidr in cidrs:
            vals2.append(dict(id=uuidutils.generate_uuid(),
                              created_at=timeutils.utcnow(),
                              ip_policy_id=ip_policy["id"],
                              cidr=str(cidr)))

    LOG.info("IP Policy CIDR IDs to insert: %s", [v["id"] for v in vals2])
    connection.execute(ip_policy_cidrs.insert(), *vals2)

    # 4. Set ip_policy_id rows in quark_subnets.
    for ((id, cidr, tenant_id), ip_policy) in zip(data, vals):
        connection.execute(subnets.update().values(
            ip_policy_id=ip_policy["id"]).where(
                subnets.c.id == id))
예제 #23
0
 def _get_fake_host_state(self, index=0):
     host_state = host_manager.HostState(
         'host_%s' % index,
         'node_%s' % index)
     host_state.free_ram_mb = 50000
     host_state.service = {
         "disabled": False,
         "updated_at": timeutils.utcnow(),
         "created_at": timeutils.utcnow(),
     }
     return host_state
예제 #24
0
    def _action_set_stubs(self, now, action_expected=True):
        # Setup stubs for the action tests
        self.m.StubOutWithMock(timeutils, 'utcnow')
        timeutils.utcnow().MultipleTimes().AndReturn(now)

        if action_expected:
            dummy_action = DummyAction()
            self.m.StubOutWithMock(parser.Stack, 'resource_by_refid')
            parser.Stack.resource_by_refid(mox.IgnoreArg()).\
                MultipleTimes().AndReturn(dummy_action)

        self.m.ReplayAll()
예제 #25
0
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""

        # give agents extra time to handle transient failures
        agent_dead_limit = cfg.CONF.agent_down_time * 2

        # check for an abrupt clock change since last check. if a change is
        # detected, sleep for a while to let the agents check in.
        tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
                                              timeutils.utcnow())
        if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
            LOG.warn(_LW("Time since last L3 agent reschedule check has "
                         "exceeded the interval between checks. Waiting "
                         "before check to allow agents to send a heartbeat "
                         "in case there was a clock adjustment."))
            time.sleep(agent_dead_limit)
        self._clock_jump_canary = timeutils.utcnow()

        context = n_ctx.get_admin_context()
        cutoff = timeutils.utcnow() - datetime.timedelta(
            seconds=agent_dead_limit)
        down_bindings = (
            context.session.query(RouterL3AgentBinding).
            join(agents_db.Agent).
            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                   agents_db.Agent.admin_state_up).
            outerjoin(l3_attrs_db.RouterExtraAttributes,
                      l3_attrs_db.RouterExtraAttributes.router_id ==
                      RouterL3AgentBinding.router_id).
            filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                          l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            for binding in down_bindings:
                LOG.warn(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except db_exc.DBError:
            # Catch DB errors here so a transient DB connectivity issue
            # doesn't stop the loopingcall.
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
예제 #26
0
    def _stub_meta_expected(self, now, data, nmeta=1):
        # Stop time at now
        self.m.StubOutWithMock(timeutils, 'utcnow')
        timeutils.utcnow().MultipleTimes().AndReturn(now)

        # Then set a stub to ensure the metadata update is as
        # expected based on the timestamp and data
        self.m.StubOutWithMock(resource.Resource, 'metadata_set')
        expected = {timeutils.strtime(now): data}
        # Note for ScalingPolicy, we expect to get a metadata
        # update for the policy and autoscaling group, so pass nmeta=2
        for x in range(nmeta):
            resource.Resource.metadata_set(expected).AndReturn(None)
예제 #27
0
 def _get_agents(self, hosts):
     return [
         agents_db.Agent(
             binary='neutron-dhcp-agent',
             host=host,
             topic=topics.DHCP_AGENT,
             configurations="",
             agent_type=constants.AGENT_TYPE_DHCP,
             created_at=timeutils.utcnow(),
             started_at=timeutils.utcnow(),
             heartbeat_timestamp=timeutils.utcnow())
         for host in hosts
     ]
예제 #28
0
        def run_test():
            a = timeutils.utcnow()

            for x in xrange(requests):
                self.driver.select_destinations(
                    self.context, request_spec, {})

            b = timeutils.utcnow()
            c = b - a

            seconds = (c.days * 24 * 60 * 60 + c.seconds)
            microseconds = seconds * 1000 + c.microseconds / 1000.0
            per_request_ms = microseconds / requests
            return per_request_ms
예제 #29
0
    def test_volume_index_detail_serializer(self):
        serializer = volumes.VolumesTemplate()
        raw_volumes = [dict(
                id='vol1_id',
                status='vol1_status',
                size=1024,
                availabilityZone='vol1_availability',
                createdAt=timeutils.utcnow(),
                attachments=[dict(
                        id='vol1_id',
                        volumeId='vol1_id',
                        serverId='instance_uuid',
                        device='/foo1')],
                displayName='vol1_name',
                displayDescription='vol1_desc',
                volumeType='vol1_type',
                snapshotId='snap1_id',
                metadata=dict(
                    foo='vol1_foo',
                    bar='vol1_bar',
                    ),
                ),
                       dict(
                id='vol2_id',
                status='vol2_status',
                size=1024,
                availabilityZone='vol2_availability',
                createdAt=timeutils.utcnow(),
                attachments=[dict(
                        id='vol2_id',
                        volumeId='vol2_id',
                        serverId='instance_uuid',
                        device='/foo2')],
                displayName='vol2_name',
                displayDescription='vol2_desc',
                volumeType='vol2_type',
                snapshotId='snap2_id',
                metadata=dict(
                    foo='vol2_foo',
                    bar='vol2_bar',
                    ),
                )]
        text = serializer.serialize(dict(volumes=raw_volumes))

        tree = etree.fromstring(text)

        self.assertEqual('volumes', tree.tag)
        self.assertEqual(len(raw_volumes), len(tree))
        for idx, child in enumerate(tree):
            self._verify_volume(raw_volumes[idx], child)
예제 #30
0
파일: utils.py 프로젝트: Hussnain1/manila
 def inner(self, *args, **kwargs):
     LOG.debug("Entering %(cls)s.%(method)s",
               {'cls': self.__class__.__name__,
                'method': func.__name__})
     start = timeutils.utcnow()
     ret = func(self, *args, **kwargs)
     end = timeutils.utcnow()
     LOG.debug("Exiting %(cls)s.%(method)s. "
               "Spent %(duration)s sec. "
               "Return %(return)s",
               {'cls': self.__class__.__name__,
                'duration': timeutils.delta_seconds(start, end),
                'method': func.__name__,
                'return': ret})
     return ret
예제 #31
0
 def update(self, values):
     """dict.update() behaviour."""
     self.updated = timeutils.utcnow()
     super(_MuranoBase, self).update(values)
예제 #32
0
 def test_marshall_time(self):
     now = timeutils.utcnow()
     binary = timeutils.marshall_now(now)
     backagain = timeutils.unmarshall_time(binary)
     self.assertEqual(now, backagain)
예제 #33
0
    def publish_sample(self, env, bytes_received, bytes_sent):
        path = env['PATH_INFO']
        method = env['REQUEST_METHOD']
        headers = dict((header.strip('HTTP_'), env[header]) for header
                       in env if header.startswith('HTTP_'))

        try:
            container = obj = None
            version, account, remainder = path.replace(
                '/', '', 1).split('/', 2)
            if not version or not account:
                raise ValueError('Invalid path: %s' % path)
            if remainder:
                if '/' in remainder:
                    container, obj = remainder.split('/', 1)
                else:
                    container = remainder
        except ValueError:
            return

        now = timeutils.utcnow().isoformat()

        resource_metadata = {
            "path": path,
            "version": version,
            "container": container,
            "object": obj,
        }

        for header in self.metadata_headers:
            if header.upper() in headers:
                resource_metadata['http_header_%s' % header] = headers.get(
                    header.upper())

        with self.pipeline_manager.publisher(
                context.get_admin_context()) as publisher:
            if bytes_received:
                publisher([sample.Sample(
                    name='storage.objects.incoming.bytes',
                    type=sample.TYPE_DELTA,
                    unit='B',
                    volume=bytes_received,
                    user_id=env.get('HTTP_X_USER_ID'),
                    project_id=env.get('HTTP_X_TENANT_ID'),
                    resource_id=account.partition(self.reseller_prefix)[2],
                    timestamp=now,
                    resource_metadata=resource_metadata)])

            if bytes_sent:
                publisher([sample.Sample(
                    name='storage.objects.outgoing.bytes',
                    type=sample.TYPE_DELTA,
                    unit='B',
                    volume=bytes_sent,
                    user_id=env.get('HTTP_X_USER_ID'),
                    project_id=env.get('HTTP_X_TENANT_ID'),
                    resource_id=account.partition(self.reseller_prefix)[2],
                    timestamp=now,
                    resource_metadata=resource_metadata)])

            # publish the event for each request
            # request method will be recorded in the metadata
            resource_metadata['method'] = method.lower()
            publisher([sample.Sample(
                name='storage.api.request',
                type=sample.TYPE_DELTA,
                unit='request',
                volume=1,
                user_id=env.get('HTTP_X_USER_ID'),
                project_id=env.get('HTTP_X_TENANT_ID'),
                resource_id=account.partition(self.reseller_prefix)[2],
                timestamp=now,
                resource_metadata=resource_metadata)])
예제 #34
0
 def test_advance_time_seconds(self):
     timeutils.set_time_override(self.one_minute_before)
     timeutils.advance_time_seconds(60)
     self.assertEqual(timeutils.utcnow(), self.skynet_self_aware_time)
예제 #35
0
 def __setitem__(self, key, value):
     self.updated = timeutils.utcnow()
     super(_MuranoBase, self).__setitem__(key, value)
예제 #36
0
 def test_iso8601_from_timestamp(self):
     utcnow = timeutils.utcnow()
     iso = timeutils.isotime(utcnow)
     ts = calendar.timegm(utcnow.timetuple())
     self.assertEqual(iso, timeutils.iso8601_from_timestamp(ts))
예제 #37
0
def validate_expiration(token_ref):
    if timeutils.utcnow() > token_ref.expires:
        raise exception.Unauthorized(_('Federation token is expired'))
예제 #38
0
    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter
        """
        # Record the updated resource metadata - we use $setOnInsert to
        # unconditionally insert sample timestamps and resource metadata
        # (in the update case, this must be conditional on the sample not
        # being out-of-order)
        resource = self.db.resource.find_and_modify(
            {'_id': data['resource_id']},
            {
                '$set': {
                    'project_id': data['project_id'],
                    'user_id': data['user_id'],
                    'source': data['source'],
                },
                '$setOnInsert': {
                    'metadata': data['resource_metadata'],
                    'first_sample_timestamp': data['timestamp'],
                    'last_sample_timestamp': data['timestamp'],
                },
                '$addToSet': {
                    'meter': {
                        'counter_name': data['counter_name'],
                        'counter_type': data['counter_type'],
                        'counter_unit': data['counter_unit'],
                    },
                },
            },
            upsert=True,
            new=True,
        )

        # only update last sample timestamp if actually later (the usual
        # in-order case)
        last_sample_timestamp = resource.get('last_sample_timestamp')
        if (last_sample_timestamp is None
                or last_sample_timestamp <= data['timestamp']):
            self.db.resource.update({'_id': data['resource_id']}, {
                '$set': {
                    'metadata': data['resource_metadata'],
                    'last_sample_timestamp': data['timestamp']
                }
            })

        # only update first sample timestamp if actually earlier (the unusual
        # out-of-order case)
        # NOTE: a null first sample timestamp is not updated as this indicates
        # a pre-existing resource document dating from before we started
        # recording these timestamps in the resource collection
        first_sample_timestamp = resource.get('first_sample_timestamp')
        if (first_sample_timestamp is not None
                and first_sample_timestamp > data['timestamp']):
            self.db.resource.update(
                {'_id': data['resource_id']},
                {'$set': {
                    'first_sample_timestamp': data['timestamp']
                }})

        # Record the raw data for the meter. Use a copy so we do not
        # modify a data structure owned by our caller (the driver adds
        # a new key '_id').
        record = copy.copy(data)
        record['recorded_at'] = timeutils.utcnow()
        self.db.meter.insert(record)
예제 #39
0
    def test_sync_instances(self):
        call_info = {}

        def sync_instances(self, context, **kwargs):
            call_info['project_id'] = kwargs.get('project_id')
            call_info['updated_since'] = kwargs.get('updated_since')
            call_info['deleted'] = kwargs.get('deleted')

        self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)

        req = self._get_request("cells/sync_instances")
        req.environ['nova.context'] = self.context
        body = {}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])

        body = {'project_id': 'test-project'}
        self.controller.sync_instances(req, body=body)
        self.assertEqual(call_info['project_id'], 'test-project')
        self.assertIsNone(call_info['updated_since'])

        expected = timeutils.utcnow().isoformat()
        if not expected.endswith("+00:00"):
            expected += "+00:00"

        body = {'updated_since': expected}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertEqual(call_info['updated_since'], expected)

        body = {'updated_since': 'skjdfkjsdkf'}
        self.assertRaises(self.bad_request,
                          self.controller.sync_instances,
                          req,
                          body=body)

        body = {'deleted': False}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])
        self.assertEqual(call_info['deleted'], False)

        body = {'deleted': 'False'}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])
        self.assertEqual(call_info['deleted'], False)

        body = {'deleted': 'True'}
        self.controller.sync_instances(req, body=body)
        self.assertIsNone(call_info['project_id'])
        self.assertIsNone(call_info['updated_since'])
        self.assertEqual(call_info['deleted'], True)

        body = {'deleted': 'foo'}
        self.assertRaises(self.bad_request,
                          self.controller.sync_instances,
                          req,
                          body=body)

        body = {'foo': 'meow'}
        self.assertRaises(self.bad_request,
                          self.controller.sync_instances,
                          req,
                          body=body)
예제 #40
0
 def __call__(self, req):
     start = timeutils.utcnow()
     rv = req.get_response(self.application)
     self.log_request_completion(rv, req, start)
     return rv
예제 #41
0
 def update_worker(self, hostname):
     count = (self.model_query(models.Worker).filter_by(
         hostname=hostname).update({'updated_at': timeutils.utcnow()}))
     if count == 0:
         raise exceptions.WorkerNotFound(worker=hostname)
예제 #42
0
    def _tenant_usages_for_period(self,
                                  context,
                                  period_start,
                                  period_stop,
                                  tenant_id=None,
                                  detailed=True):

        instances = objects.InstanceList.get_active_by_window_joined(
            context,
            period_start,
            period_stop,
            tenant_id,
            expected_attrs=['system_metadata'])
        rval = {}
        flavors = {}

        for instance in instances:
            info = {}
            info['hours'] = self._hours_for(instance, period_start,
                                            period_stop)
            flavor = self._get_flavor(context, instance, flavors)
            if not flavor:
                info['flavor'] = ''
            else:
                info['flavor'] = flavor.name

            info['instance_id'] = instance.uuid
            info['name'] = instance.display_name

            info['memory_mb'] = instance.memory_mb
            info['local_gb'] = instance.root_gb + instance.ephemeral_gb
            info['vcpus'] = instance.vcpus

            info['tenant_id'] = instance.project_id

            # NOTE(mriedem): We need to normalize the start/end times back
            # to timezone-naive so the response doesn't change after the
            # conversion to objects.
            info['started_at'] = timeutils.normalize_time(instance.launched_at)

            info['ended_at'] = (timeutils.normalize_time(
                instance.terminated_at) if instance.terminated_at else None)

            if info['ended_at']:
                info['state'] = 'terminated'
            else:
                info['state'] = instance.vm_state

            now = timeutils.utcnow()

            if info['state'] == 'terminated':
                delta = info['ended_at'] - info['started_at']
            else:
                delta = now - info['started_at']

            info['uptime'] = delta.days * 24 * 3600 + delta.seconds

            if info['tenant_id'] not in rval:
                summary = {}
                summary['tenant_id'] = info['tenant_id']
                if detailed:
                    summary['server_usages'] = []
                summary['total_local_gb_usage'] = 0
                summary['total_vcpus_usage'] = 0
                summary['total_memory_mb_usage'] = 0
                summary['total_hours'] = 0
                summary['start'] = timeutils.normalize_time(period_start)
                summary['stop'] = timeutils.normalize_time(period_stop)
                rval[info['tenant_id']] = summary

            summary = rval[info['tenant_id']]
            summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
            summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
            summary['total_memory_mb_usage'] += (info['memory_mb'] *
                                                 info['hours'])

            summary['total_hours'] += info['hours']
            if detailed:
                summary['server_usages'].append(info)

        return rval.values()
예제 #43
0
    def __init__(self,
                 user_id,
                 project_id,
                 is_admin=None,
                 read_deleted="no",
                 roles=None,
                 remote_address=None,
                 timestamp=None,
                 request_id=None,
                 auth_token=None,
                 overwrite=True,
                 quota_class=None,
                 user_name=None,
                 project_name=None,
                 service_catalog=None,
                 instance_lock_checked=False,
                 **kwargs):
        """:param read_deleted: 'no' indicates deleted records are hidden,
                'yes' indicates deleted records are visible,
                'only' indicates that *only* deleted records are visible.


           :param overwrite: Set to False to ensure that the greenthread local
                copy of the index is not overwritten.

           :param kwargs: Extra arguments that might be present, but we ignore
                because they possibly came in from older rpc messages.
        """
        if kwargs:
            LOG.warn(
                _('Arguments dropped when creating context: %s') % str(kwargs))

        self.user_id = user_id
        self.project_id = project_id
        self.roles = roles or []
        self.read_deleted = read_deleted
        self.remote_address = remote_address
        if not timestamp:
            timestamp = timeutils.utcnow()
        if isinstance(timestamp, six.string_types):
            timestamp = timeutils.parse_strtime(timestamp)
        self.timestamp = timestamp
        if not request_id:
            request_id = generate_request_id()
        self.request_id = request_id
        self.auth_token = auth_token

        if service_catalog:
            # Only include required parts of service_catalog
            self.service_catalog = [
                s for s in service_catalog
                if s.get('type') in ('volume', 'volumev2')
            ]
        else:
            # if list is empty or none
            self.service_catalog = []

        self.instance_lock_checked = instance_lock_checked

        # NOTE(markmc): this attribute is currently only used by the
        # rs_limits turnstile pre-processor.
        # See https://lists.launchpad.net/openstack/msg12200.html
        self.quota_class = quota_class
        self.user_name = user_name
        self.project_name = project_name
        self.is_admin = is_admin
        if self.is_admin is None:
            self.is_admin = False
        if overwrite or not hasattr(local.store, 'context'):
            self.update_store()
예제 #44
0
 def _assert_valid(self, token_id, token_ref):
     """Raise TokenNotFound if the token is expired."""
     current_time = timeutils.normalize_time(timeutils.utcnow())
     expires = token_ref.get('expires')
     if not expires or current_time > timeutils.normalize_time(expires):
         raise exception.TokenNotFound(token_id=token_id)
예제 #45
0
    def reserve(self, context, resources, deltas, expire=None,
                project_id=None, user_id=None):
        """Check quotas and reserve resources.

        For counting quotas--those quotas for which there is a usage
        synchronization function--this method checks quotas against
        current usage and the desired deltas.

        This method will raise a QuotaResourceUnknown exception if a
        given resource is unknown or if it does not have a usage
        synchronization function.

        If any of the proposed values is over the defined quota, an
        OverQuota exception will be raised with the sorted list of the
        resources which are too high.  Otherwise, the method returns a
        list of reservation UUIDs which were created.

        :param context: The request context, for access checks.
        :param resources: A dictionary of the registered resources.
        :param deltas: A dictionary of the proposed delta changes.
        :param expire: An optional parameter specifying an expiration
                       time for the reservations.  If it is a simple
                       number, it is interpreted as a number of
                       seconds and added to the current time; if it is
                       a datetime.timedelta object, it will also be
                       added to the current time.  A datetime.datetime
                       object will be interpreted as the absolute
                       expiration time.  If None is specified, the
                       default expiration time set by
                       --default-reservation-expire will be used (this
                       value will be treated as a number of seconds).
        :param project_id: Specify the project_id if current context
                           is admin and admin wants to impact on
                           common user's tenant.
        :param user_id: Specify the user_id if current context
                        is admin and admin wants to impact on
                        common user.
        """

        # Set up the reservation expiration
        if expire is None:
            expire = CONF.reservation_expire
        if isinstance(expire, (int, long)):
            expire = datetime.timedelta(seconds=expire)
        if isinstance(expire, datetime.timedelta):
            expire = timeutils.utcnow() + expire
        if not isinstance(expire, datetime.datetime):
            raise exception.InvalidReservationExpiration(expire=expire)

        # If project_id is None, then we use the project_id in context
        if project_id is None:
            project_id = context.project_id
        # If user_id is None, then we use the project_id in context
        if user_id is None:
            user_id = context.user_id

        # Get the applicable quotas.
        # NOTE(Vek): We're not worried about races at this point.
        #            Yes, the admin may be in the process of reducing
        #            quotas, but that's a pretty rare thing.
        quotas = self._get_quotas(context, resources, deltas.keys(),
                                  has_sync=True, project_id=project_id)
        user_quotas = self._get_quotas(context, resources, deltas.keys(),
                                       has_sync=True, project_id=project_id,
                                       user_id=user_id)

        # NOTE(Vek): Most of the work here has to be done in the DB
        #            API, because we have to do it in a transaction,
        #            which means access to the session.  Since the
        #            session isn't available outside the DBAPI, we
        #            have to do the work there.
        return db.quota_reserve(context, resources, quotas, user_quotas,
                                deltas, expire,
                                CONF.until_refresh, CONF.max_age,
                                project_id=project_id, user_id=user_id)
예제 #46
0
import datetime

from oslo.utils import timeutils

from keystone import config
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
from keystone import token
from keystone.token.providers import pki


CONF = config.CONF

FUTURE_DELTA = datetime.timedelta(seconds=CONF.token.expiration)
CURRENT_DATE = timeutils.utcnow()

SAMPLE_V2_TOKEN = {
    "access": {
        "trust": {
            "id": "abc123",
            "trustee_user_id": "123456"
        },
        "serviceCatalog": [
            {
                "endpoints": [
                    {
                        "adminURL": "http://localhost:8774/v1.1/01257",
                        "id": "51934fe63a5b4ac0a32664f64eb462c3",
                        "internalURL": "http://localhost:8774/v1.1/01257",
                        "publicURL": "http://localhost:8774/v1.1/01257",
예제 #47
0
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import datetime

from lxml import etree
from oslo.utils import timeutils

import cinder.api.contrib.availability_zones
import cinder.context
import cinder.test
import cinder.volume.api

created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
current_time = timeutils.utcnow()


def list_availability_zones(self):
    return (
        {
            'name': 'ping',
            'available': True
        },
        {
            'name': 'pong',
            'available': False
        },
    )

예제 #48
0
 def _advance_time(self, factor):
     delta = datetime.timedelta(seconds=self.test_interval * factor)
     self.mock_utcnow.return_value = timeutils.utcnow() + delta
예제 #49
0
    def setUp(self):
        super(TestTgtAdmDriver, self).setUp()
        self.configuration = conf.Configuration(None)
        self.configuration.append_config_values = mock.Mock(return_value=0)
        self.configuration.iscsi_ip_address = '10.9.8.7'
        self.fake_volumes_dir = tempfile.mkdtemp()
        self.fake_id_1 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
        self.fake_id_2 = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba'
        self.stubs.Set(self.configuration, 'safe_get', self.fake_safe_get)
        self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(),
                                 configuration=self.configuration)
        self.testvol_1 =\
            {'project_id': self.fake_id_1,
             'name': 'testvol',
             'size': 1,
             'id': self.fake_id_2,
             'volume_type_id': None,
             'provider_location': '10.9.8.7:3260 '
                                  'iqn.2010-10.org.openstack:'
                                  'volume-%s 0' % self.fake_id_2,
             'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
                              'c76370d66b 2FE0CQ8J196R',
             'provider_geometry': '512 512',
             'created_at': timeutils.utcnow(),
             'host': 'fake_host@lvm#lvm'}

        self.expected_iscsi_properties = \
            {'auth_method': 'CHAP',
             'auth_password': '******',
             'auth_username': '******',
             'encrypted': False,
             'logical_block_size': '512',
             'physical_block_size': '512',
             'target_discovered': False,
             'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' %
                           self.fake_id_2,
             'target_lun': 0,
             'target_portal': '10.10.7.1:3260',
             'volume_id': self.fake_id_2}

        self.fake_iscsi_scan =\
            ('Target 1: iqn.2010-10.org.openstack:volume-83c2e877-feed-46be-8435-77884fe55b45\n'  # noqa
             '    System information:\n'
             '        Driver: iscsi\n'
             '        State: ready\n'
             '    I_T nexus information:\n'
             '    LUN information:\n'
             '        LUN: 0\n'
             '            Type: controller\n'
             '            SCSI ID: IET     00010000\n'
             '            SCSI SN: beaf10\n'
             '            Size: 0 MB, Block size: 1\n'
             '            Online: Yes\n'
             '            Removable media: No\n'
             '            Prevent removal: No\n'
             '            Readonly: No\n'
             '            SWP: No\n'
             '            Thin-provisioning: No\n'
             '            Backing store type: null\n'
             '            Backing store path: None\n'
             '            Backing store flags:\n'
             '        LUN: 1\n'
             '            Type: disk\n'
             '            SCSI ID: IET     00010001\n'
             '            SCSI SN: beaf11\n'
             '            Size: 1074 MB, Block size: 512\n'
             '            Online: Yes\n'
             '            Removable media: No\n'
             '            Prevent removal: No\n'
             '            Readonly: No\n'
             '            SWP: No\n'
             '            Thin-provisioning: No\n'
             '            Backing store type: rdwr\n'
             '            Backing store path: /dev/stack-volumes-lvmdriver-1/volume-83c2e877-feed-46be-8435-77884fe55b45\n'  # noqa
             '            Backing store flags:\n'
             '    Account information:\n'
             '        mDVpzk8cZesdahJC9h73\n'
             '    ACL information:\n'
             '        ALL"\n')
예제 #50
0
 def test_get_instance_diagnostics(self):
     instance_ref, network_info = self._get_running_instance(obj=True)
     instance_ref['launched_at'] = timeutils.utcnow()
     self.connection.get_instance_diagnostics(instance_ref)
예제 #51
0
 def setUp(self):
     super(TestConductorObject, self).setUp()
     self.fake_conductor = utils.get_test_conductor(
         updated_at=timeutils.utcnow())
예제 #52
0
def last_completed_audit_period(unit=None, before=None):
    """This method gives you the most recently *completed* audit period.

    arguments:
            units: string, one of 'hour', 'day', 'month', 'year'
                    Periods normally begin at the beginning (UTC) of the
                    period unit (So a 'day' period begins at midnight UTC,
                    a 'month' unit on the 1st, a 'year' on Jan, 1)
                    unit string may be appended with an optional offset
                    like so:  'day@18'  This will begin the period at 18:00
                    UTC.  'month@15' starts a monthly period on the 15th,
                    and year@3 begins a yearly one on March 1st.
            before: Give the audit period most recently completed before
                    <timestamp>. Defaults to now.


    returns:  2 tuple of datetimes (begin, end)
              The begin timestamp of this audit period is the same as the
              end of the previous.
    """
    if not unit:
        unit = CONF.instance_usage_audit_period

    offset = 0
    if '@' in unit:
        unit, offset = unit.split("@", 1)
        offset = int(offset)

    if before is not None:
        rightnow = before
    else:
        rightnow = timeutils.utcnow()
    if unit not in ('month', 'day', 'year', 'hour'):
        raise ValueError('Time period must be hour, day, month or year')
    if unit == 'month':
        if offset == 0:
            offset = 1
        end = datetime.datetime(day=offset,
                                month=rightnow.month,
                                year=rightnow.year)
        if end >= rightnow:
            year = rightnow.year
            if 1 >= rightnow.month:
                year -= 1
                month = 12 + (rightnow.month - 1)
            else:
                month = rightnow.month - 1
            end = datetime.datetime(day=offset, month=month, year=year)
        year = end.year
        if 1 >= end.month:
            year -= 1
            month = 12 + (end.month - 1)
        else:
            month = end.month - 1
        begin = datetime.datetime(day=offset, month=month, year=year)

    elif unit == 'year':
        if offset == 0:
            offset = 1
        end = datetime.datetime(day=1, month=offset, year=rightnow.year)
        if end >= rightnow:
            end = datetime.datetime(day=1,
                                    month=offset,
                                    year=rightnow.year - 1)
            begin = datetime.datetime(day=1,
                                      month=offset,
                                      year=rightnow.year - 2)
        else:
            begin = datetime.datetime(day=1,
                                      month=offset,
                                      year=rightnow.year - 1)

    elif unit == 'day':
        end = datetime.datetime(hour=offset,
                                day=rightnow.day,
                                month=rightnow.month,
                                year=rightnow.year)
        if end >= rightnow:
            end = end - datetime.timedelta(days=1)
        begin = end - datetime.timedelta(days=1)

    elif unit == 'hour':
        end = rightnow.replace(minute=offset, second=0, microsecond=0)
        if end >= rightnow:
            end = end - datetime.timedelta(hours=1)
        begin = end - datetime.timedelta(hours=1)

    return (begin, end)
예제 #53
0
 def soft_delete(self, session=None):
     """Mark this object as deleted."""
     self.update_and_save({'deleted_at': timeutils.utcnow()},
                          session=session)
예제 #54
0
import testscenarios

from oslo import messaging
from oslo.messaging.notify import dispatcher as notify_dispatcher
from oslo.utils import timeutils
from tests import utils as test_utils

load_tests = testscenarios.load_tests_apply_scenarios


notification_msg = dict(
    publisher_id="publisher_id",
    event_type="compute.start",
    payload={"info": "fuu"},
    message_id="uuid",
    timestamp=str(timeutils.utcnow())
)


class TestDispatcher(test_utils.BaseTestCase):

    scenarios = [
        ('no_endpoints',
         dict(endpoints=[],
              endpoints_expect_calls=[],
              priority='info',
              ex=None,
              return_value=messaging.NotificationResult.HANDLED)),
        ('one_endpoints',
         dict(endpoints=[['warn']],
              endpoints_expect_calls=['warn'],
예제 #55
0
 def delete_trust(self, trust_id):
     with sql.transaction() as session:
         trust_ref = session.query(TrustModel).get(trust_id)
         if not trust_ref:
             raise exception.TrustNotFound(trust_id=trust_id)
         trust_ref.deleted_at = timeutils.utcnow()
예제 #56
0
def _model_query(context, model, filters, fields=None):
    filters = filters or {}
    model_filters = []
    eq_filters = ["address", "cidr", "deallocated", "ip_version",
                  "mac_address_range_id", "transaction_id"]
    in_filters = ["device_id", "device_owner", "group_id", "id", "mac_address",
                  "name", "network_id", "segment_id", "subnet_id",
                  "used_by_tenant_id", "version"]

    # Sanitize incoming filters to only attributes that exist in the model.
    # NOTE: Filters for unusable attributes are silently dropped here.
    # NOTE: When the filter key != attribute key, a conditional must be added
    #       here.
    model_attrs = _model_attrs(model)
    filters = {x: y for x, y in filters.items()
               if x in model_attrs or
               (x == "tenant_id" and model == models.IPAddress) or
               (x == "ip_address" and model == models.IPAddress) or
               (x == "reuse_after" and model in (models.IPAddress,
                                                 models.MacAddress))}

    # Inject the tenant id if none is set. We don't need unqualified queries.
    # This works even when a non-shared, other-tenant owned network is passed
    # in because the authZ checks that happen in Neutron above us yank it back
    # out of the result set.
    if not filters.get("tenant_id") and not context.is_admin:
        filters["tenant_id"] = [context.tenant_id]

    if model == models.SecurityGroupRule:
        sg_rule_attribs = ["direction", "port_range_max", "port_range_min"]
        eq_filters.extend(sg_rule_attribs)

    for key, value in filters.items():
        # This is mostly for unittests, as they're configured to send in None
        if value is None:
            continue
        if key in in_filters:
            model_type = getattr(model, key)
            model_filters.append(model_type.in_(value))
        elif key in eq_filters:
            model_type = getattr(model, key)
            model_filters.append(model_type == value)
        elif key == "_deallocated":
            if value:
                model_filters.append(model._deallocated == 1)
            else:
                model_filters.append(model._deallocated != 1)
        elif key == "ethertype":
            etypes = []
            for etype in value:
                etypes.append(protocols.translate_ethertype(etype))
            model_filters.append(model.ethertype.in_(etypes))
        elif key == "ip_address":
            model_filters.append(model.address.in_(
                [ip.ipv6().value for ip in value]))
        elif key == 'protocol':
            pnums = []
            for version in (protocols.PROTOCOLS_V4, protocols.PROTOCOLS_V6):
                pnums.extend([y for x, y in version.items() if x in value])
            model_filters.append(model.protocol.in_(pnums))
        elif key == "reuse_after":
            reuse = (timeutils.utcnow() -
                     datetime.timedelta(seconds=value))
            # NOTE(asadoughi): should this allow for deallocated_at = null?
            model_filters.append(model.deallocated_at <= reuse)
        elif key == "tenant_id":
            if model == models.IPAddress:
                model_filters.append(model.used_by_tenant_id.in_(value))
            else:
                model_filters.append(model.tenant_id.in_(value))

    return model_filters
예제 #57
0
 def _get_time_now(self):
     """Get current UTC. Broken out for testing."""
     return timeutils.utcnow()
예제 #58
0
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

import mock
from oslo.serialization import jsonutils
from oslo.utils import timeutils

from nova import db
from nova import exception
from nova.objects import aggregate
from nova.objects import service
from nova.tests.unit.objects import test_compute_node
from nova.tests.unit.objects import test_objects

NOW = timeutils.utcnow().replace(microsecond=0)
fake_service = {
    'created_at': NOW,
    'updated_at': None,
    'deleted_at': None,
    'deleted': False,
    'id': 123,
    'host': 'fake-host',
    'binary': 'fake-service',
    'topic': 'fake-service-topic',
    'report_count': 1,
    'disabled': False,
    'disabled_reason': None,
}

OPTIONAL = ['availability_zone', 'compute_node']
예제 #59
0
 def test_delta_seconds(self):
     before = timeutils.utcnow()
     after = before + datetime.timedelta(
         days=7, seconds=59, microseconds=123456)
     self.assertAlmostEquals(604859.123456,
                             timeutils.delta_seconds(before, after))
예제 #60
0
파일: utils.py 프로젝트: avirambh/cinder
def service_is_up(service):
    """Check whether a service is up based on last heartbeat."""
    last_heartbeat = service['updated_at'] or service['created_at']
    # Timestamps in DB are UTC.
    elapsed = (timeutils.utcnow() - last_heartbeat).total_seconds()
    return abs(elapsed) <= CONF.service_down_time