Exemple #1
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    print "## delay=%s" % delay
                    if delay <= 0:
                        LOG.warn(
                            _('task run outlasted interval by %s sec') %
                            -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                print "#### exception=%s" % e.retvalue
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Exemple #2
0
    def run_periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        idle_for = DEFAULT_INTERVAL
        for task_name, task in self._periodic_tasks:
            full_task_name = '.'.join([self.__class__.__name__, task_name])

            now = timeutils.utcnow()
            spacing = self._periodic_spacing[task_name]
            last_run = self._periodic_last_run[task_name]

            # If a periodic task is _nearly_ due, then we'll run it early
            if spacing is not None and last_run is not None:
                due = last_run + datetime.timedelta(seconds=spacing)
                if not timeutils.is_soon(due, 0.2):
                    idle_for = min(idle_for, timeutils.delta_seconds(now, due))
                    continue

            if spacing is not None:
                idle_for = min(idle_for, spacing)

            LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
            self._periodic_last_run[task_name] = timeutils.utcnow()

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
                              locals())
            time.sleep(0)

        return idle_for
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""

        # give agents extra time to handle transient failures
        agent_dead_limit = cfg.CONF.agent_down_time * 2

        # check for an abrupt clock change since last check. if a change is
        # detected, sleep for a while to let the agents check in.
        tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
                                              timeutils.utcnow())
        if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
            LOG.warn(_LW("Time since last L3 agent reschedule check has "
                         "exceeded the interval between checks. Waiting "
                         "before check to allow agents to send a heartbeat "
                         "in case there was a clock adjustment."))
            time.sleep(agent_dead_limit)
        self._clock_jump_canary = timeutils.utcnow()

        context = n_ctx.get_admin_context()
        cutoff = timeutils.utcnow() - datetime.timedelta(
            seconds=agent_dead_limit)
        down_bindings = (
            context.session.query(RouterL3AgentBinding).
            join(agents_db.Agent).
            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                   agents_db.Agent.admin_state_up))
        for binding in down_bindings:
            LOG.warn(_LW("Rescheduling router %(router)s from agent %(agent)s "
                         "because the agent did not report to the server in "
                         "the last %(dead_time)s seconds."),
                     {'router': binding.router_id,
                      'agent': binding.l3_agent_id,
                      'dead_time': agent_dead_limit})
            self.reschedule_router(context, binding.router_id)
    def run_periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        idle_for = DEFAULT_INTERVAL
        for task_name, task in self._periodic_tasks:
            full_task_name = '.'.join([self.__class__.__name__, task_name])

            now = timeutils.utcnow()
            spacing = self._periodic_spacing[task_name]
            last_run = self._periodic_last_run[task_name]

            # If a periodic task is _nearly_ due, then we'll run it early
            if spacing is not None and last_run is not None:
                due = last_run + datetime.timedelta(seconds=spacing)
                if not timeutils.is_soon(due, 0.2):
                    idle_for = min(idle_for, timeutils.delta_seconds(now, due))
                    continue

            if spacing is not None:
                idle_for = min(idle_for, spacing)

            LOG.debug(_("Running periodic task %(full_task_name)s"),
                      {"full_task_name": full_task_name})
            self._periodic_last_run[task_name] = timeutils.utcnow()

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
                              {"full_task_name": full_task_name, "e": e})
            time.sleep(0)

        return idle_for
Exemple #5
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = timeutils.utcnow()
                    self.f(*self.args, **self.kw)
                    end = timeutils.utcnow()
                    if not self._running:
                        break
                    delay = interval - timeutils.delta_seconds(start, end)
                    if delay <= 0:
                        LOG.warn(_('task run outlasted interval by %s sec') %
                                 -delay)
                    greenthread.sleep(delay if delay > 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Exemple #6
0
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""

        # give agents extra time to handle transient failures
        agent_dead_limit = cfg.CONF.agent_down_time * 2

        # check for an abrupt clock change since last check. if a change is
        # detected, sleep for a while to let the agents check in.
        tdelta = timeutils.utcnow() - getattr(self, "_clock_jump_canary", timeutils.utcnow())
        if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
            LOG.warn(
                _LW(
                    "Time since last L3 agent reschedule check has "
                    "exceeded the interval between checks. Waiting "
                    "before check to allow agents to send a heartbeat "
                    "in case there was a clock adjustment."
                )
            )
            time.sleep(agent_dead_limit)
        self._clock_jump_canary = timeutils.utcnow()

        context = n_ctx.get_admin_context()
        cutoff = timeutils.utcnow() - datetime.timedelta(seconds=agent_dead_limit)
        down_bindings = (
            context.session.query(RouterL3AgentBinding)
            .join(agents_db.Agent)
            .filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up)
            .outerjoin(
                l3_attrs_db.RouterExtraAttributes,
                l3_attrs_db.RouterExtraAttributes.router_id == RouterL3AgentBinding.router_id,
            )
            .filter(
                sa.or_(
                    l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                    l3_attrs_db.RouterExtraAttributes.ha == sql.null(),
                )
            )
        )
        try:
            for binding in down_bindings:
                LOG.warn(
                    _LW(
                        "Rescheduling router %(router)s from agent %(agent)s "
                        "because the agent did not report to the server in "
                        "the last %(dead_time)s seconds."
                    ),
                    {"router": binding.router_id, "agent": binding.l3_agent_id, "dead_time": agent_dead_limit},
                )
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed, n_rpc.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id)
        except db_exc.DBError:
            # Catch DB errors here so a transient DB connectivity issue
            # doesn't stop the loopingcall.
            LOG.exception(_LE("Exception encountered during router " "rescheduling."))
def upgrade():
    ip_policy = table('quark_ip_policy',
                      column('id', sa.String(length=36)),
                      column('tenant_id', sa.String(length=255)),
                      column('created_at', sa.DateTime()))
    ip_policy_cidrs = table('quark_ip_policy_cidrs',
                            column('id', sa.String(length=36)),
                            column('created_at', sa.DateTime()),
                            column('ip_policy_id', sa.String(length=36)),
                            column('cidr', sa.String(length=64)))
    subnets = table('quark_subnets',
                    column('id', sa.String(length=36)),
                    column('_cidr', sa.String(length=64)),
                    column('tenant_id', sa.String(length=255)),
                    column('ip_policy_id', sa.String(length=36)))

    connection = op.get_bind()

    # 1. Find all subnets without ip_policy.
    data = connection.execute(select([
        subnets.c.id, subnets.c._cidr, subnets.c.tenant_id]).where(
            subnets.c.ip_policy_id == None)).fetchall()  # noqa
    if not data:
        return

    LOG.info("Subnet IDs without IP policies: %s", [d[0] for d in data])

    # 2. Insert ip_policy rows with id.
    vals = [dict(id=uuidutils.generate_uuid(),
                 created_at=timeutils.utcnow(),
                 tenant_id=tenant_id)
            for id, cidr, tenant_id in data]

    LOG.info("IP Policy IDs to insert: %s", [v["id"] for v in vals])
    connection.execute(ip_policy.insert(), *vals)

    # 3. Insert default ip_policy_cidrs for those ip_policy's.
    vals2 = []
    for ((id, cidr, tenant_id), ip_policy) in zip(data, vals):
        cidrs = []
        ip_policies.ensure_default_policy(cidrs, [dict(cidr=cidr)])
        for cidr in cidrs:
            vals2.append(dict(id=uuidutils.generate_uuid(),
                              created_at=timeutils.utcnow(),
                              ip_policy_id=ip_policy["id"],
                              cidr=str(cidr)))

    LOG.info("IP Policy CIDR IDs to insert: %s", [v["id"] for v in vals2])
    connection.execute(ip_policy_cidrs.insert(), *vals2)

    # 4. Set ip_policy_id rows in quark_subnets.
    for ((id, cidr, tenant_id), ip_policy) in zip(data, vals):
        connection.execute(subnets.update().values(
            ip_policy_id=ip_policy["id"]).where(
                subnets.c.id == id))
def upgrade():
    ip_policy = table('quark_ip_policy', column('id', sa.String(length=36)),
                      column('tenant_id', sa.String(length=255)),
                      column('created_at', sa.DateTime()))
    ip_policy_cidrs = table('quark_ip_policy_cidrs',
                            column('id', sa.String(length=36)),
                            column('created_at', sa.DateTime()),
                            column('ip_policy_id', sa.String(length=36)),
                            column('cidr', sa.String(length=64)))
    subnets = table('quark_subnets', column('id', sa.String(length=36)),
                    column('_cidr', sa.String(length=64)),
                    column('tenant_id', sa.String(length=255)),
                    column('ip_policy_id', sa.String(length=36)))

    connection = op.get_bind()

    # 1. Find all subnets without ip_policy.
    data = connection.execute(
        select([subnets.c.id, subnets.c._cidr, subnets.c.tenant_id
                ]).where(subnets.c.ip_policy_id == None)).fetchall()  # noqa
    if not data:
        return

    LOG.info("Subnet IDs without IP policies: %s", [d[0] for d in data])

    # 2. Insert ip_policy rows with id.
    vals = [
        dict(id=uuidutils.generate_uuid(),
             created_at=timeutils.utcnow(),
             tenant_id=tenant_id) for id, cidr, tenant_id in data
    ]

    LOG.info("IP Policy IDs to insert: %s", [v["id"] for v in vals])
    connection.execute(ip_policy.insert(), *vals)

    # 3. Insert default ip_policy_cidrs for those ip_policy's.
    vals2 = []
    for ((id, cidr, tenant_id), ip_policy) in zip(data, vals):
        cidrs = []
        ip_policies.ensure_default_policy(cidrs, [dict(cidr=cidr)])
        for cidr in cidrs:
            vals2.append(
                dict(id=uuidutils.generate_uuid(),
                     created_at=timeutils.utcnow(),
                     ip_policy_id=ip_policy["id"],
                     cidr=str(cidr)))

    LOG.info("IP Policy CIDR IDs to insert: %s", [v["id"] for v in vals2])
    connection.execute(ip_policy_cidrs.insert(), *vals2)

    # 4. Set ip_policy_id rows in quark_subnets.
    for ((id, cidr, tenant_id), ip_policy) in zip(data, vals):
        connection.execute(subnets.update().values(
            ip_policy_id=ip_policy["id"]).where(subnets.c.id == id))
Exemple #9
0
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""

        # give agents extra time to handle transient failures
        agent_dead_limit = cfg.CONF.agent_down_time * 2

        # check for an abrupt clock change since last check. if a change is
        # detected, sleep for a while to let the agents check in.
        tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
                                              timeutils.utcnow())
        if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
            LOG.warn(_LW("Time since last L3 agent reschedule check has "
                         "exceeded the interval between checks. Waiting "
                         "before check to allow agents to send a heartbeat "
                         "in case there was a clock adjustment."))
            time.sleep(agent_dead_limit)
        self._clock_jump_canary = timeutils.utcnow()

        context = n_ctx.get_admin_context()
        cutoff = timeutils.utcnow() - datetime.timedelta(
            seconds=agent_dead_limit)
        down_bindings = (
            context.session.query(RouterL3AgentBinding).
            join(agents_db.Agent).
            filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                   agents_db.Agent.admin_state_up).
            outerjoin(l3_attrs_db.RouterExtraAttributes,
                      l3_attrs_db.RouterExtraAttributes.router_id ==
                      RouterL3AgentBinding.router_id).
            filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
                          l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
        try:
            for binding in down_bindings:
                LOG.warn(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        n_rpc.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except db_exc.DBError:
            # Catch DB errors here so a transient DB connectivity issue
            # doesn't stop the loopingcall.
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
 def _get_agents(self, hosts):
     return [
         agents_db.Agent(binary='neutron-dhcp-agent',
                         host=host,
                         topic=topics.DHCP_AGENT,
                         configurations="",
                         agent_type=constants.AGENT_TYPE_DHCP,
                         created_at=timeutils.utcnow(),
                         started_at=timeutils.utcnow(),
                         heartbeat_timestamp=timeutils.utcnow())
         for host in hosts
     ]
 def _get_agents(self, hosts):
     return [
         agents_db.Agent(
             binary='neutron-dhcp-agent',
             host=host,
             topic=topics.DHCP_AGENT,
             configurations="",
             agent_type=constants.AGENT_TYPE_DHCP,
             created_at=timeutils.utcnow(),
             started_at=timeutils.utcnow(),
             heartbeat_timestamp=timeutils.utcnow())
         for host in hosts
     ]
    def _process_router_update(self):
        for rp, update in self._queue.each_update_to_next_router():
            LOG.debug("Starting router update for %s", update.id)
            router = update.router
            if update.action != DELETE_ROUTER and not router:
                try:
                    update.timestamp = timeutils.utcnow()
                    routers = self.plugin_rpc.get_routers(
                        self.context, [update.id])
                except Exception:
                    msg = _("Failed to fetch router information for '%s'")
                    LOG.exception(msg, update.id)
                    self.fullsync = True
                    continue

                if routers:
                    router = routers[0]

            if not router:
                self._router_removed(update.id)
                continue

            self._process_routers([router])
            LOG.debug("Finished a router update for %s", update.id)
            rp.fetched_and_processed(update.timestamp)
    def _process_router_update(self):
        for rp, update in self._queue.each_update_to_next_router():
            LOG.debug("Starting router update for %s", update.id)
            router = update.router
            if update.action != DELETE_ROUTER and not router:
                try:
                    update.timestamp = timeutils.utcnow()
                    routers = self.plugin_rpc.get_routers(self.context,
                                                          [update.id])
                except Exception:
                    msg = _("Failed to fetch router information for '%s'")
                    LOG.exception(msg, update.id)
                    self.fullsync = True
                    continue

                if routers:
                    router = routers[0]

            if not router:
                self._router_removed(update.id)
                continue

            self._process_routers([router])
            LOG.debug("Finished a router update for %s", update.id)
            rp.fetched_and_processed(update.timestamp)
 def test_schedule_router_distributed(self):
     scheduler = l3_agent_scheduler.ChanceScheduler()
     agent = agents_db.Agent()
     agent.admin_state_up = True
     agent.heartbeat_timestamp = timeutils.utcnow()
     sync_router = {
         'id': 'foo_router_id',
         'distributed': True
     }
     plugin = mock.Mock()
     plugin.get_router.return_value = sync_router
     plugin.get_l3_agents_hosting_routers.return_value = []
     plugin.get_l3_agents.return_value = [agent]
     plugin.get_l3_agent_candidates.return_value = [agent]
     with mock.patch.object(scheduler, 'bind_router'):
         scheduler._schedule_router(
             plugin, self.adminContext,
             'foo_router_id', None, {'gw_exists': True})
     expected_calls = [
         mock.call.get_router(mock.ANY, 'foo_router_id'),
         mock.call.schedule_snat_router(
             mock.ANY, 'foo_router_id', sync_router, True),
         mock.call.get_l3_agents_hosting_routers(
             mock.ANY, ['foo_router_id'], admin_state_up=True),
         mock.call.get_l3_agents(mock.ANY, active=True),
         mock.call.get_l3_agent_candidates(
             mock.ANY, sync_router, [agent], None),
     ]
     plugin.assert_has_calls(expected_calls)
Exemple #15
0
    def create_openvpnconnection(self, context, openvpnconnection):
        openvpnconnection = openvpnconnection['openvpnconnection']
        tenant_id = self._get_tenant_id_for_create(context, openvpnconnection)
        l3_plugin = manager.NeutronManager.get_service_plugins().get(
            constants.L3_ROUTER_NAT)
        if not l3_plugin:
            raise openvpn.RouterExtNotFound()

        openvpn_conns = self.get_openvpnconnections(
            context, filters={'router_id': [openvpnconnection['router_id']]})

        if openvpn_conns:
            raise openvpn.OpenvpnInExists(
                router_id=openvpnconnection['router_id'])

        external = self.get_external(context, openvpnconnection['router_id'],
                                     openvpnconnection['peer_cidr'])
        openvpn_id = uuidutils.generate_uuid()
        openvpnconnection.update(external)
        openvpnconnection.update({'id': openvpn_id})

        ta_key_info = ca.OpenVPNDBDrv().generate_client_ca(openvpn_id)
        openvpn_file = ca.OpenVPNFile(openvpnconnection)
        zip_contents = openvpn_file.generate_zip_file()

        #l3_plugin.get_router(context, openvpnconnection['router_id'])
        with context.session.begin(subtransactions=True):
            self._validate_peer_vpn_cidr(context,
                                         openvpnconnection['router_id'],
                                         openvpnconnection['peer_cidr'])

            openvpnconn_db = OpenVPNConnection(
                id=openvpn_id,
                tenant_id=tenant_id,
                name=openvpnconnection['name'],
                description=openvpnconnection['description'],
                peer_cidr=openvpnconnection['peer_cidr'],
                port=openvpnconnection['port'],
                protocol=openvpnconnection['protocol'],
                router_id=openvpnconnection['router_id'],
                admin_state_up=openvpnconnection['admin_state_up'],
                status=constants.DOWN,
                created_at=timeutils.utcnow(),
                ta_key=ta_key_info['ta_key'],
                zip_file=zip_contents,
            )
            utils.make_default_name(openvpnconn_db,
                                    uos_constants.UOS_PRE_OPENVPN)
            context.session.add(openvpnconn_db)

        openvpn_cons = self._make_openvpn_ca_dict(openvpnconn_db)
        openvpn_cons.update(external)
        LOG.debug(_('openvpn service info %s in db '), openvpn_cons)
        #remove all file of client
        openvpn_file.remove_all_file()

        if self.openvpn_driver:
            self.openvpn_driver.create_vpnservice(context, openvpn_cons)

        return self._make_openvpnconnection_dict(openvpnconn_db)
Exemple #16
0
    def attempt_to_reallocate_ip(self, context, net_id, port_id, reuse_after,
                                 version=None, ip_address=None):
        version = version or [4, 6]
        elevated = context.elevated()

        # We never want to take the chance of an infinite loop here. Instead,
        # we'll clean up multiple bad IPs if we find them (assuming something
        # is really wrong)
        for times in xrange(3):
            with context.session.begin(subtransactions=True):
                address = db_api.ip_address_find(
                    elevated, network_id=net_id, reuse_after=reuse_after,
                    deallocated=True, scope=db_api.ONE, ip_address=ip_address,
                    lock_mode=True, version=version, order_by="address")

                if address:
                    #NOTE(mdietz): We should always be in the CIDR but we've
                    #              also said that before :-/
                    if address.get("subnet"):
                        cidr = netaddr.IPNetwork(address["subnet"]["cidr"])
                        addr = netaddr.IPAddress(int(address["address"]),
                                                 version=int(cidr.version))
                        if addr in cidr:
                            updated_address = db_api.ip_address_update(
                                elevated, address, deallocated=False,
                                deallocated_at=None,
                                allocated_at=timeutils.utcnow())
                            return [updated_address]
                        else:
                            # Make sure we never find it again
                            context.session.delete(address)
                            continue
                break
        return []
    def _create_or_update_agent(self, context, agent):
        with context.session.begin(subtransactions=True):
            res_keys = ['agent_type', 'binary', 'host', 'topic']
            res = dict((k, agent[k]) for k in res_keys)

            configurations_dict = agent.get('configurations', {})
            res['configurations'] = jsonutils.dumps(configurations_dict)
            current_time = timeutils.utcnow()
            try:
                agent_db = self._get_agent_by_type_and_host(
                    context, agent['agent_type'], agent['host'])
                res['heartbeat_timestamp'] = current_time
                if agent.get('start_flag'):
                    res['started_at'] = current_time
                greenthread.sleep(0)
                agent_db.update(res)
            except ext_agent.AgentNotFoundByTypeHost:
                greenthread.sleep(0)
                res['created_at'] = current_time
                res['started_at'] = current_time
                res['heartbeat_timestamp'] = current_time
                res['admin_state_up'] = True
                agent_db = Agent(**res)
                greenthread.sleep(0)
                context.session.add(agent_db)
            greenthread.sleep(0)
def make_active_agent(fake_id, fake_agent_type, config=None):
    agent_dict = dict(id=fake_id,
                      agent_type=fake_agent_type,
                      host='localhost_' + str(fake_id),
                      heartbeat_timestamp=timeutils.utcnow(),
                      configurations=config)
    return agent_dict
    def create_pool(self, context, pool):
        network = self._core_plugin.get_network(context, pool['network_id'])
        subnet_id = pool.get('subnet_id', None)
        if subnet_id:
            subnet = self._core_plugin.get_subnet(context, subnet_id)

            if subnet['network_id']!=pool['network_id']:
                raise loadbalancerv2.NetworkSubnetIDMismatch(
                    subnet_id = subnet_id,
                    network_id = pool['network_id'])

        with context.session.begin(subtransactions=True):
            self._load_id_and_tenant_id(context, pool)
            pool['status'] = constants.PENDING_CREATE

            session_info = pool.pop('session_persistence')
            healthmonitor_info = pool.pop('healthmonitor')

            pool['created_at'] = timeutils.utcnow()
            pool_db = models.PoolV2(**pool)
            if session_info:
                LOG.debug('_create_pool session_info %s',session_info)
                s_p = self._create_session_persistence_db(session_info,
                                                          pool_db.id)
                pool_db.session_persistence = s_p
            if healthmonitor_info:
                health_monitor = self._create_healthmonitor_db(healthmonitor_info,
                                                          pool_db.id)
                pool_db.healthmonitor = health_monitor
            LOG.debug('_create_pool pool_db %s', pool_db)

            context.session.add(pool_db)
            context.session.flush()
        return data_models.Pool.from_sqlalchemy_model(pool_db)
Exemple #20
0
    def create_or_update_agent(self, context, agent):
        """Create or update agent according to report."""
        with context.session.begin(subtransactions=True):
            res_keys = ['agent_type', 'binary', 'host', 'topic']
            res = dict((k, agent[k]) for k in res_keys)

            configurations_dict = agent.get('configurations', {})
            res['configurations'] = jsonutils.dumps(configurations_dict)
            current_time = timeutils.utcnow()
            try:
                agent_db = self._get_agent_by_type_and_host(
                    context, agent['agent_type'], agent['host'])
                res['heartbeat_timestamp'] = current_time
                if agent.get('start_flag'):
                    res['started_at'] = current_time
                greenthread.sleep(0)
                agent_db.update(res)
            except ext_agent.AgentNotFoundByTypeHost:
                greenthread.sleep(0)
                res['created_at'] = current_time
                res['started_at'] = current_time
                res['heartbeat_timestamp'] = current_time
                res['admin_state_up'] = True
                agent_db = Agent(**res)
                greenthread.sleep(0)
                context.session.add(agent_db)
            greenthread.sleep(0)
    def is_device_reachable(self, device):
        """Check the device which hosts this resource is reachable.

        If the resource is not reachable, it is added to the backlog.

        :param device : dict of the device
        :return True if device is reachable, else None
        """
        hd = device
        hd_id = device['id']
        mgmt_url = device.get('mgmt_url', None)
        if mgmt_url:
            hd_mgmt_ip = mgmt_url.get('ip_address', None)
            device['created_at'] = datetime.datetime.strptime(
                device['created_at'], '%Y-%m-%dT%H:%M:%S.000000')

            if hd_id not in self.backlog_devices.keys():
                if _is_pingable(hd_mgmt_ip):
                    LOG.debug("Hosting device: %(hd_id)s@%(ip)s is reachable.",
                              {'hd_id': hd_id, 'ip': hd_mgmt_ip})
                    return True
                LOG.warn("Hosting device: %(hd_id)s@%(ip)s is NOT reachable.",
                          {'hd_id': hd_id, 'ip': hd_mgmt_ip})
                #hxn add
                hd['backlog_insertion_ts'] = max(
                    timeutils.utcnow(),
                    hd['created_at'] +
                    datetime.timedelta(seconds=BOOT_TIME_INTERVAL))
                self.backlog_devices[hd_id] = hd
                LOG.debug("Hosting device: %(hd_id)s @ %(ip)s is now added "
                          "to backlog", {'hd_id': hd_id, 'ip': hd_mgmt_ip})
        else:
            LOG.debug("Hosting device: %(hd_id)s can not added "
                      "to backlog, because of no mgmt_ip", {'hd_id': hd_id})
Exemple #22
0
class AgentExtRpcCallback(n_rpc.RpcCallback):
    """Processes the rpc report in plugin implementations."""

    RPC_API_VERSION = '1.0'
    START_TIME = timeutils.utcnow()

    def __init__(self, plugin=None):
        super(AgentExtRpcCallback, self).__init__()
        self.plugin = plugin

    def report_state(self, context, **kwargs):
        """Report state from agent to server."""
        time = kwargs['time']
        time = timeutils.parse_strtime(time)
        if self.START_TIME > time:
            LOG.debug(_("Message with invalid timestamp received"))
            raise ext_agent.AgentInvalidTimestamp()
        agent_state = kwargs['agent_state']['agent_state']
        if not self.plugin:
            self.plugin = manager.NeutronManager.get_plugin()
        self.plugin.create_or_update_agent(context, agent_state)
        if cfg.CONF.isolate_relay_cidr and agent_state[
                'binary'] == 'neutron-l3-agent':
            start_flag = agent_state.get('start_flag', None)
            if start_flag:
                return {'isolate_relay_cidr': cfg.CONF.isolate_relay_cidr}
Exemple #23
0
    def test_allocate_finds_ip_reallocates(self):
        network = dict(name="public", tenant_id="fake")
        ipnet = netaddr.IPNetwork("0.0.0.0/24")
        next_ip = ipnet.ipv6().first + 10
        subnet = dict(cidr="0.0.0.0/24",
                      next_auto_assign_ip=next_ip,
                      ip_policy=None,
                      tenant_id="fake",
                      do_not_use=False)

        addr = netaddr.IPAddress("0.0.0.2")

        after_reuse_after = cfg.CONF.QUARK.ipam_reuse_after + 1
        reusable_after = datetime.timedelta(seconds=after_reuse_after)
        deallocated_at = timeutils.utcnow() - reusable_after
        ip_address = dict(address=addr,
                          version=4,
                          _deallocated=True,
                          deallocated_at=deallocated_at)

        with self._stubs(network, subnet, ip_address) as net:
            ipaddress = []
            self.ipam.allocate_ip_address(self.context, ipaddress, net["id"],
                                          0, 0)
            self.assertIsNotNone(ipaddress[0]['id'])
            expected = netaddr.IPAddress("0.0.0.2").ipv6().value
            self.assertEqual(ipaddress[0]['address'], expected)
            self.assertEqual(ipaddress[0]['version'], 4)
            self.assertEqual(ipaddress[0]['used_by_tenant_id'], "fake")
Exemple #24
0
 def create_pptpconnection(self, context, pptpconnection):
     pptpconnection = pptpconnection['pptpconnection']
     tenant_id = self._get_tenant_id_for_create(context,
                                                pptpconnection)
     l3_plugin = manager.NeutronManager.get_service_plugins().get(
         constants.L3_ROUTER_NAT)
     if not l3_plugin:
         raise pptpvpnaas.RouterExtNotFound()
     l3_plugin.get_router(context, pptpconnection['router_id'])
     with context.session.begin(subtransactions=True):
         self._validate_vpn_cidr(context,
                                 pptpconnection['router_id'],
                                 pptpconnection['vpn_cidr'])
         pptpconn_db = PPTPConnection(
             id=uuidutils.generate_uuid(),
             tenant_id=tenant_id,
             name=pptpconnection['name'],
             description=pptpconnection['description'],
             vpn_cidr=pptpconnection['vpn_cidr'],
             router_id=pptpconnection['router_id'],
             admin_state_up=pptpconnection['admin_state_up'],
             status=constants.DOWN,
             created_at=timeutils.utcnow(),
         )
         utils.make_default_name(pptpconn_db, uos_constants.UOS_PRE_PPTP)
         context.session.add(pptpconn_db)
     result = self._make_pptpconnection_dict(pptpconn_db)
     if self.pptp_driver:
         self.pptp_driver.create_vpnservice(context, result)
     return result
 def test_schedule_router_distributed(self):
     scheduler = l3_agent_scheduler.ChanceScheduler()
     agent = agents_db.Agent()
     agent.admin_state_up = True
     agent.heartbeat_timestamp = timeutils.utcnow()
     sync_router = {'id': 'foo_router_id', 'distributed': True}
     plugin = mock.Mock()
     plugin.get_router.return_value = sync_router
     plugin.get_l3_agents_hosting_routers.return_value = []
     plugin.get_l3_agents.return_value = [agent]
     plugin.get_l3_agent_candidates.return_value = [agent]
     with mock.patch.object(scheduler, 'bind_router'):
         scheduler._schedule_router(plugin, self.adminContext,
                                    'foo_router_id', None,
                                    {'gw_exists': True})
     expected_calls = [
         mock.call.get_router(mock.ANY, 'foo_router_id'),
         mock.call.schedule_snat_router(mock.ANY, 'foo_router_id',
                                        sync_router, True),
         mock.call.get_l3_agents_hosting_routers(mock.ANY,
                                                 ['foo_router_id'],
                                                 admin_state_up=True),
         mock.call.get_l3_agents(mock.ANY, active=True),
         mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent],
                                           None),
     ]
     plugin.assert_has_calls(expected_calls)
Exemple #26
0
    def is_hosting_device_reachable(self, hosting_device):
        """Check the hosting device which hosts this resource is reachable.

        If the resource is not reachable, it is added to the backlog.

        :param hosting_device : dict of the hosting device
        :return True if device is reachable, else None
        """
        hd = hosting_device
        hd_id = hosting_device['id']
        hd_mgmt_ip = hosting_device['management_ip_address']
        # Modifying the 'created_at' to a date time object
        hosting_device['created_at'] = datetime.datetime.strptime(
            hosting_device['created_at'], '%Y-%m-%d %H:%M:%S')

        if hd_id not in self.backlog_hosting_devices:
            if _is_pingable(hd_mgmt_ip):
                LOG.debug("Hosting device: %(hd_id)s@%(ip)s is reachable.",
                          {'hd_id': hd_id, 'ip': hd_mgmt_ip})
                return True
            LOG.debug("Hosting device: %(hd_id)s@%(ip)s is NOT reachable.",
                      {'hd_id': hd_id, 'ip': hd_mgmt_ip})
            hd['backlog_insertion_ts'] = max(
                timeutils.utcnow(),
                hd['created_at'] +
                datetime.timedelta(seconds=hd['booting_time']))
            self.backlog_hosting_devices[hd_id] = {'hd': hd}
            LOG.debug("Hosting device: %(hd_id)s @ %(ip)s is now added "
                      "to backlog", {'hd_id': hd_id, 'ip': hd_mgmt_ip})
Exemple #27
0
 def create_pptpconnection(self, context, pptpconnection):
     pptpconnection = pptpconnection['pptpconnection']
     tenant_id = self._get_tenant_id_for_create(context, pptpconnection)
     l3_plugin = manager.NeutronManager.get_service_plugins().get(
         constants.L3_ROUTER_NAT)
     if not l3_plugin:
         raise pptpvpnaas.RouterExtNotFound()
     l3_plugin.get_router(context, pptpconnection['router_id'])
     with context.session.begin(subtransactions=True):
         self._validate_vpn_cidr(context, pptpconnection['router_id'],
                                 pptpconnection['vpn_cidr'])
         pptpconn_db = PPTPConnection(
             id=uuidutils.generate_uuid(),
             tenant_id=tenant_id,
             name=pptpconnection['name'],
             description=pptpconnection['description'],
             vpn_cidr=pptpconnection['vpn_cidr'],
             router_id=pptpconnection['router_id'],
             admin_state_up=pptpconnection['admin_state_up'],
             status=constants.DOWN,
             created_at=timeutils.utcnow(),
         )
         utils.make_default_name(pptpconn_db, uos_constants.UOS_PRE_PPTP)
         context.session.add(pptpconn_db)
     result = self._make_pptpconnection_dict(pptpconn_db)
     if self.pptp_driver:
         self.pptp_driver.create_vpnservice(context, result)
     return result
Exemple #28
0
    def is_hosting_device_reachable(self, hosting_device):
        """Check the hosting device which hosts this resource is reachable.

        If the resource is not reachable, it is added to the backlog.

        :param hosting_device : dict of the hosting device
        :return True if device is reachable, else None
        """
        hd = hosting_device
        hd_id = hosting_device['id']
        hd_mgmt_ip = hosting_device['management_ip_address']
        # Modifying the 'created_at' to a date time object
        hosting_device['created_at'] = datetime.datetime.strptime(
            hosting_device['created_at'], '%Y-%m-%d %H:%M:%S')

        if hd_id not in self.backlog_hosting_devices:
            if _is_pingable(hd_mgmt_ip):
                LOG.debug("Hosting device: %(hd_id)s@%(ip)s is reachable.",
                          {'hd_id': hd_id, 'ip': hd_mgmt_ip})
                return True
            LOG.debug("Hosting device: %(hd_id)s@%(ip)s is NOT reachable.",
                      {'hd_id': hd_id, 'ip': hd_mgmt_ip})
            hd['backlog_insertion_ts'] = max(
                timeutils.utcnow(),
                hd['created_at'] +
                datetime.timedelta(seconds=hd['booting_time']))
            self.backlog_hosting_devices[hd_id] = {'hd': hd}
            LOG.debug("Hosting device: %(hd_id)s @ %(ip)s is now added "
                      "to backlog", {'hd_id': hd_id, 'ip': hd_mgmt_ip})
Exemple #29
0
    def _create_or_update_agent(self, context, agent):
        with context.session.begin(subtransactions=True):
            res_keys = ["agent_type", "binary", "host", "topic"]
            res = dict((k, agent[k]) for k in res_keys)

            configurations_dict = agent.get("configurations", {})
            res["configurations"] = jsonutils.dumps(configurations_dict)
            current_time = timeutils.utcnow()
            try:
                agent_db = self._get_agent_by_type_and_host(context, agent["agent_type"], agent["host"])
                res["heartbeat_timestamp"] = current_time
                if agent.get("start_flag"):
                    res["started_at"] = current_time
                greenthread.sleep(0)
                agent_db.update(res)
            except ext_agent.AgentNotFoundByTypeHost:
                greenthread.sleep(0)
                res["created_at"] = current_time
                res["started_at"] = current_time
                res["heartbeat_timestamp"] = current_time
                res["admin_state_up"] = True
                agent_db = Agent(**res)
                greenthread.sleep(0)
                context.session.add(agent_db)
            greenthread.sleep(0)
Exemple #30
0
    def create_openvpnconnection(self, context, openvpnconnection):
        openvpnconnection = openvpnconnection['openvpnconnection']
        tenant_id = self._get_tenant_id_for_create(context,
                                                   openvpnconnection)
        l3_plugin = manager.NeutronManager.get_service_plugins().get(
            constants.L3_ROUTER_NAT)
        if not l3_plugin:
            raise openvpn.RouterExtNotFound()

        openvpn_conns = self.get_openvpnconnections(
            context, filters={'router_id': [openvpnconnection['router_id']]})

        if openvpn_conns:
            raise openvpn.OpenvpnInExists(router_id=openvpnconnection['router_id'])

        external = self.get_external(context, openvpnconnection['router_id'],
                                           openvpnconnection['peer_cidr'])
        openvpn_id = uuidutils.generate_uuid()
        openvpnconnection.update(external)
        openvpnconnection.update({'id':openvpn_id})

        ta_key_info = ca.OpenVPNDBDrv().generate_client_ca(openvpn_id)
        openvpn_file = ca.OpenVPNFile(openvpnconnection)
        zip_contents = openvpn_file.generate_zip_file()

        #l3_plugin.get_router(context, openvpnconnection['router_id'])
        with context.session.begin(subtransactions=True):
            self._validate_peer_vpn_cidr(context,
                                    openvpnconnection['router_id'],
                                    openvpnconnection['peer_cidr'])

            openvpnconn_db = OpenVPNConnection(
                id=openvpn_id,
                tenant_id=tenant_id,
                name=openvpnconnection['name'],
                description=openvpnconnection['description'],
                peer_cidr=openvpnconnection['peer_cidr'],
                port=openvpnconnection['port'],
                protocol=openvpnconnection['protocol'],
                router_id=openvpnconnection['router_id'],
                admin_state_up=openvpnconnection['admin_state_up'],
                status=constants.DOWN,
                created_at=timeutils.utcnow(),
                ta_key=ta_key_info['ta_key'],
                zip_file=zip_contents,
            )
            utils.make_default_name(openvpnconn_db, uos_constants.UOS_PRE_OPENVPN)
            context.session.add(openvpnconn_db)

        openvpn_cons = self._make_openvpn_ca_dict(openvpnconn_db)
        openvpn_cons.update(external)
        LOG.debug(_('openvpn service info %s in db '),
                             openvpn_cons)
        #remove all file of client
        openvpn_file.remove_all_file()

        if self.openvpn_driver:
            self.openvpn_driver.create_vpnservice(context, openvpn_cons)

        return self._make_openvpnconnection_dict(openvpnconn_db)
Exemple #31
0
class LbassProxyCallback(n_rpc.RpcCallback):
    """Callback for Lb proxy RPC in agent implementations."""

    RPC_API_VERSION = '1.0'
    START_TIME = timeutils.utcnow()

    def __init__(self, manager):
        self.manager = manager

    def update_stats(self, context, stats):
        return self.manager.update_stats(stats)

    def get_haproxy_config_data(self, context):
        return self.manager.get_haproxy_config_data()

    def report_state(self, context, data):
        if not data['ip'] or data['status'] == 'DOWN':
            return

        report_time = data['time']
        report_time = timeutils.parse_strtime(report_time)

        if self.START_TIME > report_time:
            LOG.debug(_("Message with invalid timestamp received"))
        else:
            ip = data['ip']
            if ip not in self.manager.proxy_ips:
                self.manager.proxy_ips[ip] = {}
                self.manager.proxy_ips[ip]['flag'] = False
            self.manager.proxy_ips[ip]['time'] = report_time
    def create_loadbalancer(self, context, loadbalancer):
        with context.session.begin(subtransactions=True):
            self._load_id_and_tenant_id(context, loadbalancer)
            vip_address = loadbalancer.pop('vip_address')
            securitygroup_id = loadbalancer.get('securitygroup_id')
            loadbalancer['status'] = constants.PENDING_CREATE
            loadbalancer['created_at'] = timeutils.utcnow()
            lb_db = models.LoadBalancer(**loadbalancer)
            context.session.add(lb_db)
            context.session.flush()
            lb_db.stats = self._create_loadbalancer_stats(
                context, lb_db.id)
            context.session.add(lb_db)

        # create port outside of lb create transaction since it can sometimes
        # cause lock wait timeouts
        try:
            self._create_port_for_load_balancer(context, lb_db,
                                        vip_address, securitygroup_id)
        except ext_sg.SecurityGroupNotFound:
            LOG.error('_create_port_for_load_balancer %s securitygroup',lb_db.id)
            with excutils.save_and_reraise_exception():
                context.session.delete(lb_db)
                context.session.flush()
                raise loadbalancerv2.SecurityGroupNotFound(id=lb_db.id) 
        except Exception:
            LOG.error('_create_port_for_load_balancer %s',lb_db.id)
            with excutils.save_and_reraise_exception():
                context.session.delete(lb_db)
                context.session.flush()
        return data_models.LoadBalancer.from_sqlalchemy_model(lb_db)
def make_inactive_agent(fake_id, fake_agent_type, delta, config=None):
    agent_dict = dict(id=fake_id,
                      agent_type=fake_agent_type,
                      host='remotehost_' + str(fake_id),
                      heartbeat_timestamp=(timeutils.utcnow() - datetime.
                                           timedelta(delta)),
                      configurations=config)
    return agent_dict
Exemple #34
0
 def deallocate_mac_address(self, context, address):
     mac = db_api.mac_address_find(context, address=address,
                                   scope=db_api.ONE)
     if not mac:
         raise exceptions.NotFound(
             message="No MAC address %s found" % netaddr.EUI(address))
     db_api.mac_address_update(context, mac, deallocated=True,
                               deallocated_at=timeutils.utcnow())
Exemple #35
0
    def _set_proxy_info(self):
        self.old_ips, old_external_networks = self.plugin_rpc.get_proxy_info()
        init_time = timeutils.utcnow()

        for ip in self.old_ips:
            self.proxy_ips[ip] = {"time": init_time, "flag": False}

        self.removed_external_networks = set(old_external_networks) - set(self.external_networks)
Exemple #36
0
 def soft_delete(self, synchronize_session='evaluate'):
     return self.update(
         {
             'deleted': literal_column('id'),
             'updated_at': literal_column('updated_at'),
             'deleted_at': timeutils.utcnow()
         },
         synchronize_session=synchronize_session)
 def test__schedule_network_no_existing_agents(self):
     agent = agents_db.Agent()
     agent.admin_state_up = True
     agent.heartbeat_timestamp = timeutils.utcnow()
     network = {'id': 'foo_net_id'}
     self._test__schedule_network(network,
                                  new_agents=None, existing_agents=[agent],
                                  expected_casts=0, expected_warnings=0)
 def __init__(self, router_id, priority,
              action=None, router=None, timestamp=None):
     self.priority = priority
     self.timestamp = timestamp
     if not timestamp:
         self.timestamp = timeutils.utcnow()
     self.id = router_id
     self.action = action
     self.router = router
Exemple #39
0
 def soft_delete(self, synchronize_session="evaluate"):
     return self.update(
         {
             "deleted": literal_column("id"),
             "updated_at": literal_column("updated_at"),
             "deleted_at": timeutils.utcnow(),
         },
         synchronize_session=synchronize_session,
     )
Exemple #40
0
    def attempt_to_reallocate_ip(self, context, net_id, port_id, reuse_after,
                                 version=None, ip_address=None,
                                 segment_id=None, subnets=None):
        version = version or [4, 6]
        elevated = context.elevated()

        # We never want to take the chance of an infinite loop here. Instead,
        # we'll clean up multiple bad IPs if we find them (assuming something
        # is really wrong)

        #TODO(mdietz & mpath): Perhaps remove, select for update might quash
        for times in xrange(3):
            with context.session.begin(subtransactions=True):

                sub_ids = []
                if subnets:
                    sub_ids = subnets
                else:
                    if segment_id:
                        subnets = db_api.subnet_find(elevated,
                                                     network_id=net_id,
                                                     segment_id=segment_id)
                        sub_ids = [s["id"] for s in subnets]
                        if not sub_ids:
                            raise exceptions.IpAddressGenerationFailure(
                                net_id=net_id)

                ip_kwargs = {
                    "network_id": net_id, "reuse_after": reuse_after,
                    "deallocated": True, "scope": db_api.ONE,
                    "ip_address": ip_address, "lock_mode": True,
                    "version": version, "order_by": "address"}

                if sub_ids:
                    ip_kwargs["subnet_id"] = sub_ids

                address = db_api.ip_address_find(elevated, **ip_kwargs)

                if address:
                    #NOTE(mdietz): We should always be in the CIDR but we've
                    #              also said that before :-/
                    if address.get("subnet"):
                        cidr = netaddr.IPNetwork(address["subnet"]["cidr"])
                        addr = netaddr.IPAddress(int(address["address"]),
                                                 version=int(cidr.version))
                        if addr in cidr:
                            updated_address = db_api.ip_address_update(
                                elevated, address, deallocated=False,
                                deallocated_at=None,
                                allocated_at=timeutils.utcnow())
                            return [updated_address]
                        else:
                            # Make sure we never find it again
                            context.session.delete(address)
                            continue
                break
        return []
Exemple #41
0
    def _set_proxy_info(self):
        self.old_ips, old_external_networks = self.plugin_rpc.get_proxy_info()
        init_time = timeutils.utcnow()

        for ip in self.old_ips:
            self.proxy_ips[ip] = {'time': init_time, 'flag': False}

        self.removed_external_networks = set(old_external_networks) -\
                                    set(self.external_networks)
Exemple #42
0
 def __call__(self, request):
     now = timeutils.utcnow()
     reqBody = "-"
     if 'xml' in str(request.content_type) or 'json' in str(
             request.content_type):
         if request.content_length is not None and request.content_length < 10240:
             reqBody = str(request.body) or '-'
     data = {
         'remote_addr': request.remote_addr,
         'remote_user': request.remote_user or '-',
         'token_id': "None",
         'request_datetime': '%s' % now.strftime(APACHE_TIME_FORMAT),
         'response_datetime': '%s' % now.strftime(APACHE_TIME_FORMAT),
         'method': request.method,
         'url': request.url,
         'http_version': request.http_version,
         'status': 500,
         'content_length': '-',
         'request_body': reqBody
     }
     token = ''
     try:
         token = request.headers['X-Auth-Token']
         token = HWExtend.b64encodeToken(token)
     except:
         token = "-"
     try:
         response = request.get_response(self.application)
         data['status'] = response.status_int
         data['content_length'] = response.content_length or '-'
     finally:
         # must be calculated *after* the application has been called
         now = timeutils.utcnow()
         data['token_id'] = token
         if "GET" in data['method'] and "/tokens/" in data['url']:
             Pos = data['url'].find("tokens") + 7
             logToken = data['url'][Pos:Pos + 32]
             encodedToken = HWExtend.b64encodeToken(logToken)
             data['url'] = data['url'].replace(logToken, encodedToken)
         # timeutils may not return UTC, so we can't hardcode +0000
         data['response_datetime'] = '%s' % (
             now.strftime(APACHE_TIME_FORMAT))
         log.info(DRM_LOG_FORMAT % data, extra={"type": "operate"})
     return response
Exemple #43
0
    def _sync_routers_task(self, context):
        if self.services_sync:
            super(L3NATAgent, self).process_services_sync(context)
        LOG.debug(_("Starting _sync_routers_task - fullsync:%s"),
                  self.fullsync)
        if not self.fullsync:
            return

        # Capture a picture of namespaces *before* fetching the full list from
        # the database.  This is important to correctly identify stale ones.
        namespaces = set()
        if self._clean_stale_namespaces:
            namespaces = self._list_namespaces()
        prev_router_ids = set(self.router_info)

        try:
            router_ids = self._router_ids()
            self.updated_routers.clear()
            self.removed_routers.clear()
            timestamp = timeutils.utcnow()
            routers = self.plugin_rpc.get_routers(
                context, router_ids)

            LOG.debug(_('Processing :%r'), routers)
            for r in routers:
                update = RouterUpdate(r['id'],
                                      PRIORITY_SYNC_ROUTERS_TASK,
                                      router=r,
                                      timestamp=timestamp)
                self._queue.add(update)
            self.fullsync = False
            LOG.debug(_("_sync_routers_task successfully completed"))
        except n_rpc.RPCException:
            LOG.exception(_("Failed synchronizing routers due to RPC error"))
            self.fullsync = True
        except Exception:
            LOG.exception(_("Failed synchronizing routers"))
            self.fullsync = True
        else:
            # Resync is not necessary for the cleanup of stale namespaces
            curr_router_ids = set([r['id'] for r in routers])

            # Two kinds of stale routers:  Routers for which info is cached in
            # self.router_info and the others.  First, handle the former.
            for router_id in prev_router_ids - curr_router_ids:
                update = RouterUpdate(router_id,
                                      PRIORITY_SYNC_ROUTERS_TASK,
                                      timestamp=timestamp,
                                      action=DELETE_ROUTER)
                self._queue.add(update)

            # Next, one effort to clean out namespaces for which we don't have
            # a record.  (i.e. _clean_stale_namespaces=False after one pass)
            if self._clean_stale_namespaces:
                ids_to_keep = curr_router_ids | prev_router_ids
                self._cleanup_namespaces(namespaces, ids_to_keep)
Exemple #44
0
 def _test_create_time(self, func, resource, resources=None):
     if not resources:
         resources = resource + "s"
     _now = timeutils.utcnow()
     with func() as obj:
         _obj = self._show(resources, obj[resource]['id'])
         c_time = _obj[resource]['created_at']
         _c = timeutils.parse_strtime(c_time)
     delta = timeutils.delta_seconds(_now, _c)
     self.assertTrue(delta > 0)
Exemple #45
0
 def deallocate_ip_address(self, context, address):
     address["deallocated"] = 1
     payload = dict(used_by_tenant_id=address["used_by_tenant_id"],
                    ip_block_id=address["subnet_id"],
                    ip_address=address["address_readable"],
                    device_ids=[p["device_id"] for p in address["ports"]],
                    created_at=address["created_at"],
                    deleted_at=timeutils.utcnow())
     n_rpc.get_notifier("network").info(context, "ip_block.address.delete",
                                        payload)
Exemple #46
0
 def _test_create_time(self, func, resource, resources=None):
     if not resources:
         resources = resource + "s"
     _now = timeutils.utcnow()
     with func() as obj:
         _obj = self._show(resources, obj[resource]['id'])
         c_time = _obj[resource]['created_at']
         _c = timeutils.parse_strtime(c_time)
     delta = timeutils.delta_seconds(_now, _c)
     self.assertTrue(delta > 0)
Exemple #47
0
 def test__schedule_network_no_existing_agents(self):
     agent = agents_db.Agent()
     agent.admin_state_up = True
     agent.heartbeat_timestamp = timeutils.utcnow()
     network = {'id': 'foo_net_id'}
     self._test__schedule_network(network,
                                  new_agents=None,
                                  existing_agents=[agent],
                                  expected_casts=0,
                                  expected_warnings=0)
Exemple #48
0
    def _update_floatingip_time_tenant(self, context, floatingip_alloc):
        floating_ip_address = floatingip_alloc['floating_ip_address']
        fip_qry = context.session.query(FloatingIpAllocation)
        fip = fip_qry.filter_by(floating_ip_address=floating_ip_address)
        fip.update({'updated_at':timeutils.utcnow(),
                    'last_tenant_id':floatingip_alloc['last_tenant_id'],
                    'floating_subnet_id':floatingip_alloc['floating_subnet_id'],
                    'allocated':floatingip_alloc['allocated']})

        LOG.info("update floatingip %s time and tenant_id successfully" % fip)
Exemple #49
0
 def __call__(self, request):
     now = timeutils.utcnow()
     reqBody = "-"
     if "xml" in str(request.content_type) or "json" in str(request.content_type):
         if request.content_length is not None and request.content_length < 10240:
             reqBody = str(request.body) or "-"
     data = {
         "remote_addr": request.remote_addr,
         "remote_user": request.remote_user or "-",
         "token_id": "None",
         "request_datetime": "%s" % now.strftime(APACHE_TIME_FORMAT),
         "response_datetime": "%s" % now.strftime(APACHE_TIME_FORMAT),
         "method": request.method,
         "url": request.url,
         "http_version": request.http_version,
         "status": 500,
         "content_length": "-",
         "request_body": reqBody,
     }
     token = ""
     try:
         token = request.headers["X-Auth-Token"]
         token = HWExtend.b64encodeToken(token)
     except:
         token = "-"
     try:
         response = request.get_response(self.application)
         data["status"] = response.status_int
         data["content_length"] = response.content_length or "-"
     finally:
         # must be calculated *after* the application has been called
         now = timeutils.utcnow()
         data["token_id"] = token
         if "GET" in data["method"] and "/tokens/" in data["url"]:
             Pos = data["url"].find("tokens") + 7
             logToken = data["url"][Pos : Pos + 32]
             encodedToken = HWExtend.b64encodeToken(logToken)
             data["url"] = data["url"].replace(logToken, encodedToken)
         # timeutils may not return UTC, so we can't hardcode +0000
         data["response_datetime"] = "%s" % (now.strftime(APACHE_TIME_FORMAT))
         log.info(DRM_LOG_FORMAT % data, extra={"type": "operate"})
     return response
Exemple #50
0
def ip_address_create(context, **address_dict):
    ip_address = models.IPAddress()
    address = address_dict.pop("address")
    ip_address.update(address_dict)
    ip_address["address"] = int(address)
    ip_address["address_readable"] = str(address)
    ip_address["tenant_id"] = context.tenant_id
    ip_address["_deallocated"] = 0
    ip_address["allocated_at"] = timeutils.utcnow()
    context.session.add(ip_address)
    return ip_address
Exemple #51
0
 def deallocate_mac_address(self, context, address):
     mac = db_api.mac_address_find(context,
                                   address=address,
                                   scope=db_api.ONE)
     if not mac:
         raise exceptions.NotFound(message="No MAC address %s found" %
                                   netaddr.EUI(address))
     db_api.mac_address_update(context,
                               mac,
                               deallocated=True,
                               deallocated_at=timeutils.utcnow())
Exemple #52
0
    def _create_floatingip_allocation(self, context, floatingip):
        with context.session.begin(subtransactions=True):
            floatingip_alloc_db = FloatingIpAllocation(
                last_tenant_id=floatingip['last_tenant_id'],
                updated_at=timeutils.utcnow(),
                floating_ip_address=floatingip['floating_ip_address'],
                floating_subnet_id=floatingip['floating_subnet_id'],
                allocated=floatingip['allocated'])
            context.session.add(floatingip_alloc_db)

        LOG.info(_("floatingip allocation %s successful") % floatingip)
Exemple #53
0
 def _create_csr1kv_vm_hosting_device(self, context):
     """Creates a CSR1kv VM instance."""
     # Note(bobmel): Nova does not handle VM dispatching well before all
     # its services have started. This creates problems for the Neutron
     # devstack script that creates a Neutron router, which in turn
     # triggers service VM dispatching.
     # Only perform pool maintenance if needed Nova services have started
     if (cfg.CONF.general.ensure_nova_running and not self._nova_running):
         if self._svc_vm_mgr.nova_services_up():
             self.__class__._nova_running = True
         else:
             LOG.info(
                 _('Not all Nova services are up and running. '
                   'Skipping this CSR1kv vm create request.'))
             return
     plugging_drv = self.get_hosting_device_plugging_driver()
     hosting_device_drv = self.get_hosting_device_driver()
     if plugging_drv is None or hosting_device_drv is None:
         return
     # These resources are owned by the L3AdminTenant
     complementary_id = uuidutils.generate_uuid()
     dev_data = {
         'complementary_id': complementary_id,
         'device_id': 'CSR1kv',
         'admin_state_up': True,
         'protocol_port': 22,
         'created_at': timeutils.utcnow()
     }
     res = plugging_drv.create_hosting_device_resources(
         context, complementary_id, self.l3_tenant_id(), self.mgmt_nw_id(),
         self.mgmt_sec_grp_id(), 1)
     if res.get('mgmt_port') is None:
         # Required ports could not be created
         return
     vm_instance = self._svc_vm_mgr.dispatch_service_vm(
         context, 'CSR1kv_nrouter', cfg.CONF.hosting_devices.csr1kv_image,
         cfg.CONF.hosting_devices.csr1kv_flavor, hosting_device_drv,
         res['mgmt_port'], res.get('ports'))
     with context.session.begin(subtransactions=True):
         if vm_instance is not None:
             dev_data.update({
                 'id': vm_instance['id'],
                 'management_port_id': res['mgmt_port']['id']
             })
             hosting_device = self._create_hosting_device(
                 context, {'hosting_device': dev_data})
         else:
             # Fundamental error like could not contact Nova
             # Cleanup anything we created
             plugging_drv.delete_hosting_device_resources(
                 context, self.l3_tenant_id(), **res)
             return
     LOG.info(_('Created a CSR1kv hosting device VM'))
     return hosting_device
Exemple #54
0
def ip_address_create(context, **address_dict):
    ip_address = models.IPAddress()
    address = address_dict.pop("address")
    ip_address.update(address_dict)
    ip_address["address"] = int(address.ipv6())
    ip_address["address_readable"] = str(address)
    ip_address["used_by_tenant_id"] = context.tenant_id
    ip_address["_deallocated"] = 0
    ip_address["allocated_at"] = timeutils.utcnow()
    context.session.add(ip_address)
    return ip_address
Exemple #55
0
def notify(context, publisher_id, event_type, priority, payload):
    """Sends a notification using the specified driver

    :param publisher_id: the source worker_type.host of the message
    :param event_type:   the literal type of event (ex. Instance Creation)
    :param priority:     patterned after the enumeration of Python logging
                         levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
    :param payload:       A python dictionary of attributes

    Outgoing message format includes the above parameters, and appends the
    following:

    message_id
      a UUID representing the id for this notification

    timestamp
      the GMT timestamp the notification was sent at

    The composite message will be constructed as a dictionary of the above
    attributes, which will then be sent via the transport mechanism defined
    by the driver.

    Message example::

        {'message_id': str(uuid.uuid4()),
         'publisher_id': 'compute.host1',
         'timestamp': timeutils.utcnow(),
         'priority': 'WARN',
         'event_type': 'compute.create_instance',
         'payload': {'instance_id': 12, ... }}

    """
    if priority not in log_levels:
        raise BadPriorityException(_('%s not in valid priorities') % priority)

    # Ensure everything is JSON serializable.
    payload = jsonutils.to_primitive(payload, convert_instances=True)

    msg = dict(message_id=str(uuid.uuid4()),
               publisher_id=publisher_id,
               event_type=event_type,
               priority=priority,
               payload=payload,
               timestamp=str(timeutils.utcnow()))

    for driver in _get_drivers():
        try:
            driver.notify(context, msg)
        except Exception as e:
            LOG.exception(
                _("Problem '%(e)s' attempting to "
                  "send to notification system. "
                  "Payload=%(payload)s") % dict(e=e, payload=payload))
 def __init__(self,
              router_id,
              priority,
              action=None,
              router=None,
              timestamp=None):
     self.priority = priority
     self.timestamp = timestamp
     if not timestamp:
         self.timestamp = timeutils.utcnow()
     self.id = router_id
     self.action = action
     self.router = router
Exemple #57
0
 def test__get_enabled_agents_with_inactive_ones(self):
     agent1 = agents_db.Agent()
     agent1.admin_state_up = True
     agent1.heartbeat_timestamp = timeutils.utcnow()
     agent2 = agents_db.Agent()
     agent2.admin_state_up = True
     # This is effectively an inactive agent
     agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0)
     network = {'id': 'foo_network_id'}
     self._test__get_enabled_agents(network,
                                    agents=[agent1, agent2],
                                    expected_warnings=1,
                                    expected_errors=0)