Example #1
0
    def __init__(self):
        super(CalicoMechanismDriver, self).__init__(
            AGENT_TYPE_FELIX,
            'tap',
            {'port_filter': True,
             'mac_address': '00:61:fe:ed:ca:fe'})

        # Initialize fields for the database object and transport.  We will
        # initialize these properly when we first need them.
        self.db = None
        self._db_context = None
        self.transport = None
        self._etcd_watcher = None
        self._etcd_watcher_thread = None
        self._my_pid = None
        self._epoch = 0
        self.in_resync = False

        # Tell the monkeypatch where we are.
        global mech_driver
        assert mech_driver is None
        mech_driver = self

        # Make sure we initialise even if we don't see any API calls.
        eventlet.spawn_after(STARTUP_DELAY_SECS, self._init_state)
Example #2
0
    def cast(self, context, method_name, *args, **kwargs):
        manager, method = self._get_tm_method(method_name)

        def func():
            method(manager, context, *args, **kwargs)

        eventlet.spawn_after(0.1, func)
Example #3
0
def main():
    signal.signal(signal.SIGTERM, shutdown)
    signal.signal(signal.SIGINT, shutdown)

    options = parse_arguments()
    if options.debug:
        level = logging.DEBUG
    else:
        level = logging.WARNING
    logging.basicConfig(format='%(asctime)s %(message)s', level=level)
    rh = logging.handlers.TimedRotatingFileHandler(
        filename='/var/log/hosting/cronurl.log',
        when='midnight',
    )
    rh.setFormatter(logging.Formatter("%(asctime)s - %(message)s"))
    rootLogger = logging.getLogger('')
    rootLogger.removeHandler(rootLogger.handlers[0])
    rootLogger.setLevel(logging.DEBUG)
    rootLogger.addHandler(rh)

    while True:
        now = time.time()
        next_minute = int(now) / 60 * 60 - now + 61
        for url, timeout, mailto in scheduled_urls():
            eventlet.spawn_after(next_minute, check_url, url, timeout, mailto)
        eventlet.sleep(next_minute + 1)
Example #4
0
    def __init__(self):
        super(CalicoMechanismDriver, self).__init__(
            AGENT_TYPE_FELIX,
            'tap',
            {'port_filter': True,
             'mac_address': '00:61:fe:ed:ca:fe'})

        # Initialize fields for the database object and transport.  We will
        # initialize these properly when we first need them.
        self.db = None
        self._db_context = None
        self.transport = None
        self._etcd_watcher = None
        self._etcd_watcher_thread = None
        self._my_pid = None
        self._epoch = 0
        self.in_resync = False
        # Mapping from (hostname, port-id) to Calico's status for a port.  The
        # hostname is included to disambiguate between multiple copies of a
        # port, which may exist during a migration or a re-schedule.
        self._port_status_cache = {}
        # Whether the version of update_port_status() available in this version
        # of OpenStack has the host argument.  computed on first use.
        self._cached_update_port_status_has_host_param = None

        # Tell the monkeypatch where we are.
        global mech_driver
        assert mech_driver is None
        mech_driver = self

        # Make sure we initialise even if we don't see any API calls.
        eventlet.spawn_after(STARTUP_DELAY_SECS, self._init_state)
    def _app_hook_handler(self, alert_body, hook_headers):
        if not alert_body['application_name']:
            self._log.info('No application found for alert %s. Will Ignore.', alert_body)
            return

        long_description = alert_body['long_description']

        if self._is_alert_opened(long_description) or \
           self._is_escalated_downtime(long_description):

            # handled opened and escalation to downtime immediately.
            payload = {
                'alert': alert_body,
                'header': hook_headers
            }
            self._dispatch_trigger(WEB_APP_ALERT_TRIGGER_REF, payload)

        elif (self._is_alert_closed(long_description) or
                self._is_downtime_recovered(long_description)):

            # handled closed and recovered after a delay.
            payload = {
                'alert': alert_body,
                'header': hook_headers
            }
            self._log.info('App alert closed. Delay.')
            eventlet.spawn_after(self._normal_report_delay, self._dispatch_application_normal,
                                 payload)

        elif (self._is_alert_canceled(long_description) or
                self._is_alert_acknowledged(long_description)):

            # ignore canceled or acknowledged
            self._log.info('Ignored alert : %s.', alert_body)
    def _server_hook_handler(self, alert_body, hook_headers):
        long_description = alert_body['long_description']
        if self._is_alert_opened(long_description) or \
           self._is_escalated_downtime(long_description):

            payload = {
                'alert': alert_body,
                'header': hook_headers
            }
            self._dispatch_trigger(SERVER_ALERT_TRIGGER_REF, payload)

        elif (self._is_alert_closed(long_description) or
                self._is_downtime_recovered(long_description)):

            payload = {
                'alert': alert_body,
                'header': hook_headers
            }
            self._log.info('App alert closed. Delay.')
            eventlet.spawn_after(self._normal_report_delay, self._dispatch_server_normal,
                                 payload)

        elif (self._is_alert_canceled(long_description) or
                self._is_alert_acknowledged(long_description)):
            self._log.info('Ignored alert : %s.', alert_body)
    def _dispatch_server_normal(self, payload, attempt_no=0):
        '''
        Dispatches SERVER_NORMAL_TRIGGER_REF if the all servers health_status is 'green'.
        '''
        # basic guard to avoid queuing up forever.
        if attempt_no == 10:
            self._log.warning('Abandoning SERVER_NORMAL_TRIGGER_REF dispatch. Payload %s', payload)
            return
        try:
            servers = self._get_servers(payload['alert']['servers'])
            # make sure all servers are ok.
            all_servers_ok = True
            for name, server in six.iteritems(servers):
                all_servers_ok &= server['health_status'] in ['green']
                if not all_servers_ok:
                    break

            if all_servers_ok:
                self._dispatch_trigger(SERVER_NORMAL_TRIGGER_REF, payload)
            else:
                for server in servers:
                    self._log.info('server %s has state %s. Rescheduling normal check.',
                                   server['name'], server['health_status'])
                eventlet.spawn_after(self._normal_report_delay, self._dispatch_server_normal,
                                     payload, attempt_no + 1)
        except:
            self._log.exception('Failed delay dispatch. Payload %s.', payload)
Example #8
0
    def aggregate_flush(self):
        try:
            if not self._count_data and not self._timing_data:
                return

            counts = '\n'.join(self._generate_counts()) + '\n'
            timings = '\n'.join(self._generate_timings()) + '\n'

            if self.debug:
                if self._count_data:
                    print 'writing counts:'
                    print repr(counts)
                if self._timing_data:
                    print 'writing timings:'
                    print repr(timings)

            csock = socket.socket()
            csock.connect((CARBON_SERVER, CARBON_PORT,))
            try:
                csock.sendall(counts)
                csock.sendall(timings)
            finally:
                csock.close()
                self._clear_data()
        finally:
            eventlet.spawn_after(AGGREGATE_TIMEOUT, self.aggregate_flush)
Example #9
0
    def prepare(
        self,
        memory_mb,
        databases,
        users,
        device_path=None,
        mount_point=None,
        backup_id=None,
        config_contents=None,
        root_password=None,
    ):
        from trove.instance.models import DBInstance
        from trove.instance.models import InstanceServiceStatus
        from trove.guestagent.models import AgentHeartBeat

        LOG.debug("users... %s" % users)
        LOG.debug("databases... %s" % databases)
        instance_name = DBInstance.find_by(id=self.id).name
        self.create_user(users)
        self.create_database(databases)

        def update_db():
            status = InstanceServiceStatus.find_by(instance_id=self.id)
            if instance_name.endswith("GUEST_ERROR"):
                status.status = rd_instance.ServiceStatuses.FAILED
            else:
                status.status = rd_instance.ServiceStatuses.RUNNING
            status.save()
            AgentHeartBeat.create(instance_id=self.id)

        eventlet.spawn_after(1.0, update_db)
Example #10
0
    def cast(self, context, msg):
        manager, method = self.get_tm_method(msg['name'])

        def func():
            method(manager, context, *msg['args'], **msg['kwargs'])

        eventlet.spawn_after(0.1, func)
Example #11
0
def register_trigger_type(trigger_definition, attempt_no=0):
    LOG.debug('Attempt no %s to register trigger %s.', attempt_no, trigger_definition['name'])

    ref = ResourceReference.to_string_reference(pack=trigger_definition['pack'],
                                                name=trigger_definition['name'])
    if _is_triggertype_exists(ref):
        return

    payload = json.dumps(trigger_definition)

    try:
        r = requests.post(url=TRIGGER_TYPE_ENDPOINT, data=payload,
                          headers=HTTP_POST_HEADER, timeout=TIMEOUT)
        if r.status_code == httplib.CREATED:
            LOG.info('Registered trigger %s.', trigger_definition['name'])
        elif r.status_code == httplib.CONFLICT:
            LOG.info('Trigger %s is already registered.', trigger_definition['name'])
        else:
            LOG.error('Seeing status code %s on an attempt to register trigger %s.',
                      r.status_code, trigger_definition['name'])
    except requests.exceptions.ConnectionError:
        if attempt_no < MAX_ATTEMPTS:
            retry_wait = RETRY_WAIT * (attempt_no + 1)
            LOG.debug('    ConnectionError. Will retry in %ss.', retry_wait)
            eventlet.spawn_after(retry_wait, register_trigger_type,
                                 trigger_definition=trigger_definition,
                                 attempt_no=(attempt_no + 1))
        else:
            LOG.warn('Failed to register trigger %s. Exceeded max attempts to register trigger.',
                     trigger_definition['name'])
    except:
        LOG.exception('Failed to register trigger %s.', trigger_definition['name'])
Example #12
0
        def set_to_confirm_mode():
            self._current_status = "VERIFY_RESIZE"

            def set_to_active():
                self.parent.schedule_simulate_running_server(self.id, 1.5)

            eventlet.spawn_after(1, set_to_active)
Example #13
0
 def listen(self, sender):
     address = "ipc://run/core-rpc"
     log.info("RPCServer listening on %s", address)
     self.context = context
     self.rep = self.context.socket(zmq.REP)
     self.rep.bind(address)
     eventlet.spawn_after(1, self.check_for_incoming_rpc_calls)
Example #14
0
def _do_register_trigger_type(attempt_no=0):
    LOG.debug('Attempt no %s to register %s.', attempt_no, ACTION_TRIGGER_TYPE['name'])
    try:
        payload = json.dumps(ACTION_TRIGGER_TYPE)
        r = requests.post(TRIGGER_TYPE_ENDPOINT,
                          data=payload,
                          headers=HTTP_POST_HEADER,
                          timeout=TIMEOUT)
        if r.status_code == 201:
            LOG.info('Registered trigger %s.', ACTION_TRIGGER_TYPE['name'])
        elif r.status_code == 409:
            LOG.info('Trigger %s is already registered.', ACTION_TRIGGER_TYPE['name'])
        else:
            LOG.error('Seeing status code %s on an attempt to register trigger %s.',
                      r.status_code, ACTION_TRIGGER_TYPE['name'])
    except requests.exceptions.ConnectionError:
        if attempt_no < MAX_ATTEMPTS:
            retry_wait = RETRY_WAIT * (attempt_no + 1)
            LOG.debug('    ConnectionError. Will retry in %ss.', retry_wait)
            eventlet.spawn_after(retry_wait, _do_register_trigger_type, attempt_no + 1)
        else:
            LOG.warn('Failed to register trigger %s. Exceeded max attempts to register trigger.',
                     ACTION_TRIGGER_TYPE['name'])
    except:
        LOG.exception('Failed to register trigger %s.', ACTION_TRIGGER_TYPE['name'])
Example #15
0
    def schedule_status(self, new_status, time_from_now):
        """Makes a new status take effect at the given time."""

        def set_status():
            self._current_status = new_status

        eventlet.spawn_after(time_from_now, set_status)
    def _app_hook_handler(self, alert_body, hook_headers):

        if alert_body['current_state'] == 'open':

            # handled opened and escalation to downtime immediately.
            payload = {
                'alert': alert_body,
                'header': hook_headers
            }
            self._dispatch_trigger(WEB_APP_ALERT_TRIGGER_REF, payload)

        elif alert_body['current_state'] == 'closed':

            # handled closed and recovered after a delay.
            payload = {
                'alert': alert_body,
                'header': hook_headers
            }
            self._log.info('App alert closed. Delay.')
            eventlet.spawn_after(self._normal_report_delay, self._dispatch_application_normal,
                                 payload)

        elif alert_body['current_state'] == 'acknowledged':

            # ignore canceled or acknowledged
            self._log.info('Ignored alert or alert acknowledged : %s.', alert_body)
Example #17
0
    def _register_trigger_type(self, trigger_definition, attempt_no=0):
        LOG.debug('Attempt no %s to register trigger %s.', attempt_no, trigger_definition['name'])

        ref = ResourceReference.to_string_reference(pack=trigger_definition['pack'],
                                                    name=trigger_definition['name'])
        if self._is_triggertype_exists(ref):
            return

        payload = json.dumps(trigger_definition)

        try:
            r = requests.post(url=self._trigger_type_endpoint, data=payload,
                              headers=self._http_post_headers, timeout=self._timeout)
            if r.status_code == httplib.CREATED:
                LOG.info('Registered trigger %s.', trigger_definition['name'])
            elif r.status_code == httplib.CONFLICT:
                LOG.info('Trigger %s is already registered.', trigger_definition['name'])
            else:
                LOG.error('Seeing status code %s on an attempt to register trigger %s.',
                          r.status_code, trigger_definition['name'])
        except requests.exceptions.ConnectionError:
            if attempt_no < self._max_attempts:
                self._retry_wait = self._retry_wait * (attempt_no + 1)
                LOG.debug('    ConnectionError. Will retry in %ss.', self._retry_wait)
                eventlet.spawn_after(self._retry_wait, self._register_trigger_type,
                                     trigger_definition=trigger_definition,
                                     attempt_no=(attempt_no + 1))
            else:
                LOG.warn('Failed to register trigger %s. ' % trigger_definition['name'] +
                         ' Exceeded max attempts to register trigger.')
        except:
            LOG.exception('Failed to register trigger %s.', trigger_definition['name'])
Example #18
0
    def get_next_output(self, timeout=45):
        """Poll for next available output on this console.

        Ideally purely event driven scheme is perfect.  AJAX over HTTP is
        at least one case where we don't have that luxury
        """
        self.reaper.cancel()
        if self._evt:
            raise Exception('get_next_output is not re-entrant')
        if not self.databuffer:
            self._evt = eventlet.event.Event()
            with eventlet.Timeout(timeout, False):
                self._evt.wait()
            self._evt = None
        if not self.databuffer:
            self.reaper = eventlet.spawn_after(15, self.destroy)
            return ""
        currdata = self.databuffer.popleft()
        if isinstance(currdata, dict):
            self.reaper = eventlet.spawn_after(15, self.destroy)
            return currdata
        retval = currdata
        while self.databuffer and not isinstance(self.databuffer[0], dict):
            retval += self.databuffer.popleft()
        # the client has 15 seconds to make a new request for data before
        # they are given up on
        self.reaper = eventlet.spawn_after(15, self.destroy)
        return retval
Example #19
0
    def UpdateShares(self, server, shares):
        diff_btc = self.bitHopper.difficulty.get_difficulty()
        diff_nmc = self.bitHopper.difficulty.get_nmc_difficulty()
        diff_scc = self.bitHopper.difficulty.get_scc_difficulty()
        diff_i0c = self.bitHopper.difficulty.get_i0c_difficulty()
        diff_ixc = self.bitHopper.difficulty.get_ixc_difficulty()
        self.servers[server]['api_lag'] = False        
        prev_shares = self.servers[server]['shares']
        self.servers[server]['init'] = True
        if shares == prev_shares:
            time = .10*self.servers[server]['refresh_time']
            self.servers[server]['refresh_time'] += .10*self.servers[server]['refresh_time']
        else:
            self.servers[server]['refresh_time'] -= .10*self.servers[server]['refresh_time']
            time = self.servers[server]['refresh_time']

        if time <= self.servers[server]['refresh_limit']:
            time = self.servers[server]['refresh_limit']
        eventlet.spawn_after(time,self.update_api_server,server)

        try:
            k =  str('{0:d}'.format(int(shares)))
            ghash_duration = '  '
            if self.servers[server]['ghash'] > 0:
                ghash_duration += str('{0:.1f}gh/s '.format( self.servers[server]['ghash'] ))
            if self.servers[server]['duration'] > 0:
                ghash_duration += '\t' + str('{0:d}min.'.format( (self.servers[server]['duration']/60) ))
            k += '\t' + ghash_duration
        except Exception, e:
            self.bitHopper.log_dbg("Error formatting")
            self.bitHopper.log_dbg(e)
            k =  str(shares)
Example #20
0
def eventlet_schedule_in(secs, target=None, args=(), kwargs=None):
    if target is None:
        def decorator(target):
            return eventlet_schedule_in(secs, target, args=args, kwargs=kwargs)
        return decorator
    if not isinstance(target, eventlet.greenthread.GreenThread):
        eventlet.spawn_after(secs, target, *args, **(kwargs or {}))
    return target
Example #21
0
 def download_objs(self, objs, sendfunc):
     objs_to_download = set()
     for obj in objs:
         if obj not in self.downloading:
             self.downloading.add(obj.hash)
             eventlet.spawn_after(10, self.cancel_download, obj.hash)
             objs_to_download.add(obj)
     sendfunc(msgs.Getdata.make(objs_to_download))
Example #22
0
 def flush_all(self, file, delay=0):
     if self.readonly:
         raise exceptions.UnsupportedCommandError()
     delay = int(delay)
     if delay > 0:
         eventlet.spawn_after(delay, self.store.clear)
     else:
         self.store.clear()
Example #23
0
 def test_create_wait(self):
     self.success = False
     thread = eventlet.spawn(self.get_messages)
     eventlet.spawn_after(0.2,
         self.backend.create_message, 'a', 'q', 'm', 'test')
     thread.wait()
     self.assertTrue(self.success)
     self.delete_messages()
Example #24
0
    def receive(self, body, server):
        if server in self.polled:
            self.polled[server].release()
        self.bitHopper.log_dbg('received lp from: ' + server)
        info = self.bitHopper.pool.servers[server]
        if info['role'] in ['mine_nmc', 'disable', 'mine_ixc', 'mine_i0c', 'mine_scc', 'info']:
            return
        if body == None:
            self.bitHopper.log_dbg('error in long pool from: ' + server)
            with self.lock:
                if server not in self.errors:
                    self.errors[server] = 0
                self.errors[server] += 1
            #timeout? Something bizarre?
            if self.errors[server] < 3 or info['role'] == 'mine_deepbit':
                eventlet.sleep(1)
                eventlet.spawn_after(0,self.pull_lp, self.pool.servers[server]['lp_address'],server, False)
            return
        try:
            output = True
            response = json.loads(body)
            work = response['result']
            data = work['data']

            block = data.decode('hex')[0:64]
            block = wordreverse(block)
            block = block.encode('hex')[56:120]
            #block = int(block, 16)

            with self.lock:
                if block not in self.blocks:
                    if bytereverse(block) in self.blocks:
                        block = bytereverse(block)
                    self.bitHopper.log_msg('New Block: ' + str(block))
                    self.bitHopper.log_msg('Block Owner ' + server)
                    self.add_block(block, work, server)

            #Add the lp_penalty if it exists.
            with self.lock:
                offset = self.pool.servers[server].get('lp_penalty','0')
                self.blocks[block][server] = time.time() + float(offset)
                self.bitHopper.log_dbg('EXACT ' + str(server) + ': ' + str(self.blocks[block][server]))
                if self.blocks[block]['_owner'] == None or self.blocks[block][server] < self.blocks[block][self.blocks[block]['_owner']]:
                    self.set_owner(server,block)
                    if self.bitHopper.lpBot != None:
                        self.bitHopper.lpBot.announce(server, block)

        except Exception, e:
            output = False
            self.bitHopper.log_dbg('Error in Long Pool ' + str(server) + str(body))
            #traceback.print_exc()
            if server not in self.errors:
                self.errors[server] = 0
            with self.lock:
                self.errors[server] += 1
            #timeout? Something bizarre?
            if self.errors[server] > 3 and info['role'] != 'mine_deepbit':
                return
Example #25
0
    def attach(self, volume_id, server_id, device_path):
        volume = self.get(volume_id)

        if volume._current_status != "available":
            raise Exception("Invalid volume status")

        def finish_attach():
            volume._current_status = "in-use"
        eventlet.spawn_after(1.0, finish_attach)
Example #26
0
    def detach(self, volume_id):
        volume = self.get(volume_id)

        if volume._current_status != 'in-use':
            raise Exception("Invalid volume status")

        def finish_detach():
            volume._current_status = "available"
        eventlet.spawn_after(1.0, finish_detach)
Example #27
0
    def reboot(self):
        LOG.debug("Rebooting server %s" % (self.id))

        def set_to_active():
            self._current_status = "ACTIVE"
            self.parent.schedule_simulate_running_server(self.id, 1.5)

        self._current_status = "REBOOT"
        eventlet.spawn_after(1, set_to_active)
Example #28
0
    def create_backup(self, backup_id):
        from trove.backup.models import Backup, BackupState
        backup = Backup.get_by_id(context=None, backup_id=backup_id)

        def finish_create_backup():
            backup.state = BackupState.COMPLETED
            backup.location = 'http://localhost/path/to/backup'
            backup.save()
        eventlet.spawn_after(1.0, finish_create_backup)
Example #29
0
def subway(config):
    """
    Run the Subway limits configuration synchronization daemon.

    :param config: The configuration file for Subway.
    """

    # Set up eventlet, first thing...
    eventlet.monkey_patch()

    # Read the configuration file
    conf = ConfigParser.SafeConfigParser()
    conf.read([config])

    # Suck in the subway config itself
    try:
        subway_config = dict(conf.items('config'))
    except ConfigParser.NoSectionError:
        # Use the defaults across the board
        subway_config = {}

    # Demand a "master" section
    if not conf.has_section('master'):
        raise SubwayException("Missing required configuration for the master")

    # OK, let's set up the master
    master = get_database(dict(conf.items('master')))

    # Now set up the slaves...
    slaves = []
    for sect in conf.sections():
        if not sect.startswith('slave:'):
            continue

        # OK, we don't actually care about the slave's name, and we'll
        # only log errors trying to set up the connection to the slave
        try:
            slaves.append(get_database(dict(conf.items(sect))))
        except redis.ConnectionError:
            LOG.exception("Failed to connect to slave %r" %
                          sect[len('slave:'):])

    # Make sure we have at least one slave
    if not slaves:
        raise SubwayException("Missing configuration for slaves")

    # Set up the daemon...
    server = SubwayDaemon(subway_config, master, slaves)

    # Do the initial limits loading, as a thread; we use spawn_after()
    # so we make sure the listening thread gets started first, so we
    # don't miss any reload commands
    eventlet.spawn_after(2.0, server.reload)

    # Now, start listening for messages
    server.listen()
Example #30
0
    def cast(self, context, method_name, *args, **kwargs):
        manager, method = self._get_tm_method(method_name)

        def func():
            try:
                method(manager, context, *args, **kwargs)
            except Exception:
                LOG.exception("Error running %s", method)

        eventlet.spawn_after(0.1, func)
Example #31
0
    def create_backup(self, backup_info):
        from trove.backup.models import Backup
        from trove.backup.state import BackupState
        backup = Backup.get_by_id(context=None, backup_id=backup_info['id'])

        def finish_create_backup():
            backup.state = BackupState.COMPLETED
            backup.location = 'http://localhost/path/to/backup'
            backup.checksum = 'fake-md5-sum'
            backup.size = BACKUP_SIZE
            backup.save()

        eventlet.spawn_after(1.0, finish_create_backup)
Example #32
0
    def schedule_idle_scan(cls):
        def _scan_idle():
            counter = metrics.counter('idle_tunnel')
            counter.clear()
            for name, tunnel in cls._tunnels.iteritems():
                if time.time() - tunnel.updated > cls.active_timeout:
                    tunnel.idle = True
                    counter.inc()
            if counter.get_count():
                logging.debug("scan: {0} of {1} tunnels are idle".format(
                    counter.get_value(), len(cls._tunnels)))
            cls.schedule_idle_scan()

        eventlet.spawn_after(cls.active_timeout, _scan_idle)
Example #33
0
    def __init__(self, env: GNEnvironment):
        self._lock = Semaphore(value=1)
        self.env = env
        self.to_check = dict()
        self.heartbeat_sids = set()

        self.expire_second = env.config.get(ConfigKeys.TIMEOUT,
                                            domain=ConfigKeys.HEARTBEAT,
                                            default=300)
        self.sleep_time = env.config.get(ConfigKeys.INTERVAL,
                                         domain=ConfigKeys.HEARTBEAT,
                                         default=20)

        eventlet.spawn_after(func=self.loop, seconds=10)
Example #34
0
def delete_ephemeral_rooms(gn_env: GNEnvironment):
    from activitystreams import parse as as_parser

    if len(gn_env.config) == 0 or gn_env.config.get(ConfigKeys.TESTING, False):
        # assume we're testing
        return

    def delete():
        from dino import utils

        channel_dict = gn_env.db.get_channels()

        for channel_id, *_ in channel_dict.items():
            rooms = gn_env.db.rooms_for_channel(channel_id)

            for room_id, room_info in rooms.items():
                short_id = room_id.split('-')[0]
                room_name = room_info['name']
                logger.debug('checking room %s: %s' % (room_id, room_name))

                users = gn_env.db.users_in_room(room_id)
                if len(users) > 0:
                    logger.debug('[%s] NOT removing room (%s), has % user(s) in it' % (short_id, room_name, len(users)))
                    continue

                if not room_info['ephemeral']:
                    logger.debug('[%s] NOT removing room (%s), not ephemeral' % (short_id, room_name))
                    continue

                logger.info('[%s] removing ephemeral room (%s)' % (short_id, room_name))

                try:
                    gn_env.db.get_room_name(room_id)
                except NoSuchRoomException:
                    logger.info('[%s] ephemeral room (%s) has already been removed' % (short_id, room_name))
                    continue

                activity = utils.activity_for_remove_room('0', 'server', room_id, room_name, 'empty ephemeral room')

                gn_env.db.remove_room(channel_id, room_id)

                # no need to notify for wio
                if gn_env.node is not None and 'wio' not in gn_env.node:
                    gn_env.out_of_scope_emit(
                        'gn_room_removed', activity, broadcast=True, include_self=True, namespace='/ws')

                gn_env.observer.emit('on_remove_room', (activity, as_parser(activity)))

    eventlet.spawn_after(seconds=30*60, func=delete)
    def _server_hook_handler(self, alert_body, hook_headers):
        if alert_body['current_state'] == 'open':

            payload = {'alert': alert_body, 'header': hook_headers}
            self._dispatch_trigger(SERVER_ALERT_TRIGGER_REF, payload)

        elif alert_body['current_state'] == 'closed':

            payload = {'alert': alert_body, 'header': hook_headers}
            self._log.info('App alert closed. Delay.')
            eventlet.spawn_after(self._normal_report_delay,
                                 self._dispatch_server_normal, payload)

        elif alert_body['current_state'] == 'acknowledged':
            self._log.info('Alert is acknowledged : %s.', alert_body)
Example #36
0
    def start(self, interval, now=True):
        """Start running pre-set function every interval seconds.
        """
        if interval < 0:
            raise ValueError('interval must be >= 0')

        if self._running:
            self.stop()

        self._running = True
        self._interval = interval
        if now:
            self._self_thread = eventlet.spawn_after(0, self)
        else:
            self._self_thread = eventlet.spawn_after(self._interval, self)
Example #37
0
    def _play_on_turn(self, r):
        if r.state.status != model.STATUS_PLAYING:
            return
        if r.state.turn != self.player_idx:
            return

        def play():
            cards = get_ai_move(r.state, self.player_idx)
            r.play(self.player_idx, cards)

        if r.state.is_starting_trick():
            delay = 2.0
        else:
            delay = 0.3
        eventlet.spawn_after(delay, play)
Example #38
0
def start_collective():
    global follower
    global retrythread
    if follower:
        follower.kill()
        cfm.stop_following()
        follower = None
    try:
        if cfm.cfgstreams:
            cfm.check_quorum()
            # Do not start if we have quorum and are leader
            return
    except exc.DegradedCollective:
        pass
    if leader_init.active:  # do not start trying to connect if we are
        # xmitting data to a follower
        return
    myname = get_myname()
    for member in sorted(list(cfm.list_collective())):
        if member == myname:
            continue
        if cfm.cfgleader is None:
            cfm.stop_following(True)
        ldrcandidate = cfm.get_collective_member(member)['address']
        log.log({'info': 'Performing startup attempt to {0}'.format(
            ldrcandidate), 'subsystem': 'collective'})
        if connect_to_leader(name=myname, leader=ldrcandidate):
            break
    else:
        retrythread = eventlet.spawn_after(30 + random.random(),
                                           start_collective)
Example #39
0
 def suspect_deadlock_on_link(self, link):
     if len(link.to_adds) + len(link.to_adds_loop) == 0:
         return
     suspecting_start = time()
     self.suspecting_deadlocks[(link.src, link.dst)] = suspecting_start
     link.scheduling_mode = constants.SUSPECTING_LINK
     # self.log.info("can_violate_congestion: %s" % self.can_violate_congestion)
     # self.log.info("segments to be done %s" % self.segments_to_be_done)
     if not self.can_violate_congestion:
         eventlet.spawn_after(constants.SUSPECTING_TIME,
                              self.suspecting_time_expire,
                              (link.src, link.dst))
     else:
         self.suspecting_deadlock_for_this_test = True
         eventlet.spawn_after(constants.SUSPECTING_TIME_SKIP_DEADLOCK,
                              self.skipping_deadlock)
Example #40
0
    def schedule_relative(
            self,
            duetime: typing.RelativeTime,
            action: typing.ScheduledAction,
            state: Optional[typing.TState] = None) -> typing.Disposable:
        """Schedules an action to be executed after duetime.

        Args:
            duetime: Relative time after which to execute the action.
            action: Action to be executed.
            state: [Optional] state to be given to the action function.

        Returns:
            The disposable object used to cancel the scheduled action
            (best effort).
        """

        seconds = self.to_seconds(duetime)
        if not seconds:
            return self.schedule(action, state=state)

        sad = SingleAssignmentDisposable()

        def interval() -> None:
            sad.disposable = self.invoke_action(action, state=state)

        timer = eventlet.spawn_after(seconds, interval)

        def dispose() -> None:
            timer.kill()

        return CompositeDisposable(sad, Disposable(dispose))
Example #41
0
    def schedule_relative(self,
                          duetime: typing.RelativeTime,
                          action: typing.ScheduledAction,
                          state: typing.TState = None) -> typing.Disposable:
        """Schedules an action to be executed after duetime.

        Args:
            duetime: Relative time after which to execute the action.
            action: Action to be executed.

        Returns:
            The disposable object used to cancel the scheduled action
            (best effort).
        """

        scheduler = self
        seconds = self.to_seconds(duetime)
        if not seconds:
            return scheduler.schedule(action, state)

        sad = SingleAssignmentDisposable()

        def interval():
            sad.disposable = self.invoke_action(action, state)

        log.debug("timeout: %s", seconds)
        timer = [eventlet.spawn_after(seconds, interval)]

        def dispose():
            # nonlocal timer
            timer[0].kill()

        return CompositeDisposable(sad, Disposable(dispose))
Example #42
0
    def render(self):
        screenful = list(self.render_tail(self.terminal.height))

        sys.stdout.write(self.terminal.move_up * self.moved)

        for idx, line in enumerate(reversed(screenful)):
            # XXX: should really wrap this properly somehow, but
            #      writing out more than the terminal width will mess up
            #      the movement logic
            delta = len(line) - self.terminal.length(line)
            sys.stdout.write(line[:self.terminal.width + delta])
            sys.stdout.write(self.terminal.clear_eol + self.terminal.move_down)
        sys.stdout.write(self.terminal.clear_eol)

        self.moved = len(screenful)
        eventlet.spawn_after(0.5, self.render)
Example #43
0
    def command_timer(self, params):
        '''<time> <message>
        In <time> seconds, display <message>.
        Send a message after a delay.
        <time> is in seconds.
        If <message> is more than one word, quotes are required.
        
        example: TIMER 5 "hello world!"
        '''
        try:
            timestr, message = params[:2]
            delay = int(timestr)
        except ValueError:
            self.writeerror("Need both a time and a message")
            return
        self.writeresponse("Waiting %d seconds..." % delay)

        if SERVERTYPE == 'green':
            event = gevent.spawn_later(delay, self.writemessage, message)

        if SERVERTYPE == 'eventlet':
            event = eventlet.spawn_after(delay, self.writemessage, message)

        if SERVERTYPE == 'threaded':
            event = threading.Timer(delay, self.writemessage, args=[message])
            event.start()

        # Used by session_end to stop all timer events when the user logs off.
        self.timer_events.append(event)
Example #44
0
    def waitForServiceToStop(
            self,
            service,
            attemptsLeft=20,
            callback=None,
            cbkwargs={}):

        if attemptsLeft > 0:

            if self.services[service]['object'].isActive():

                self.timerThreads.append(
                    eventlet.spawn_after(10,
                                         self.waitForServiceToStop,
                                         service,
                                         attemptsLeft - 1))

            else:

                del self.services[service]

                if callback:
                    callback(**cbkwargs)

        else:
            self.log_error("Unable to stop service %s", service)
Example #45
0
    def resetTimeout(self):
        if self.watchdogtask is not None: self.watchdogtask.cancel()

        def watchdogfun():
            self.state = IdleState()

        self.watchdogtask = eventlet.spawn_after(3, watchdogfun)
Example #46
0
 def __init__(self):
     self.asyncid = _assign_asyncid(self)
     self.responses = collections.deque()
     self._evt = None
     self.termrelations = []
     self.consoles = set([])
     self.reaper = eventlet.spawn_after(15, self.destroy)
Example #47
0
    def _fake_cast(self, method_name, **kwargs):
        import eventlet
        from reddwarf.taskmanager.manager import TaskManager
        instance = TaskManager()
        method = getattr(instance, method_name)

        def func():
            try:
                method(self.context, **kwargs)
            except Exception as ex:
                type_, value, tb = sys.exc_info()
                logging.error("Error running async task:")
                logging.error((traceback.format_exception(type_, value, tb)))
                raise type_, value, tb

        eventlet.spawn_after(0, func)
Example #48
0
    def schedule_periodic(self, period, action, state=None):
        """Schedules a periodic piece of work by dynamically discovering the
        schedulers capabilities.

        Keyword arguments:
        period -- Period for running the work periodically.
        action -- Action to be executed.
        state -- [Optional] Initial state passed to the action upon the first
            iteration.

        Returns the disposable object used to cancel the scheduled recurring
        action (best effort)."""

        scheduler = self
        seconds = self.to_relative(period)/1000.0
        if not seconds:
            return scheduler.schedule(action, state)

        def interval():
            new_state = action(scheduler, state)
            scheduler.schedule_periodic(period, action, new_state)

        log.debug("timeout: %s", seconds)
        timer = [eventlet.spawn_after(seconds, interval)]

        def dispose():
            timer[0].kill()

        return Disposable.create(dispose)
Example #49
0
    def schedule_relative(self, duetime, action, state=None):
        """Schedules an action to be executed after duetime.

        Keyword arguments:
        duetime -- {timedelta} Relative time after which to execute the action.
        action -- {Function} Action to be executed.

        Returns {Disposable} The disposable object used to cancel the scheduled
        action (best effort)."""

        scheduler = self
        seconds = self.to_relative(duetime)/1000.0
        if not seconds:
            return scheduler.schedule(action, state)

        disposable = SingleAssignmentDisposable()

        def interval():
            disposable.disposable = self.invoke_action(action, state)

        log.debug("timeout: %s", seconds)
        timer = [eventlet.spawn_after(seconds, interval)]

        def dispose():
            # nonlocal timer
            timer[0].kill()

        return CompositeDisposable(disposable, Disposable.create(dispose))
Example #50
0
    def get_next_output(self, timeout=45):
        """Poll for next available output on this console.

        Ideally purely event driven scheme is perfect.  AJAX over HTTP is
        at least one case where we don't have that luxury.  This function
        will not work if the session was initialized with a data callback
        instead of polling mode.
        """
        self.reaper.cancel()
        # postpone death to be 15 seconds after this would timeout
        self.reaper = eventlet.spawn_after(timeout + 15, self.destroy)
        if self._evt:
            raise Exception('get_next_output is not re-entrant')
        if not self.databuffer:
            self._evt = eventlet.event.Event()
            with eventlet.Timeout(timeout, False):
                self._evt.wait()
            self._evt = None
        if not self.databuffer:
            return ""
        currdata = self.databuffer.popleft()
        if isinstance(currdata, dict):
            return currdata
        retval = currdata
        while self.databuffer and not isinstance(self.databuffer[0], dict):
            retval += self.databuffer.popleft()

        return retval
Example #51
0
 def __init__(self, node, configmanager, username, datacallback=None,
              skipreplay=False):
     self.registered = False
     self.tenant = configmanager.tenant
     if not configmanager.is_node(node):
         raise exc.NotFoundException("Invalid node")
     self.username = username
     self.node = node
     self.configmanager = configmanager
     self.connect_session()
     self.registered = True
     self._evt = None
     self.node = node
     self.write = self.conshdl.write
     if datacallback is None:
         self.reaper = eventlet.spawn_after(15, self.destroy)
         self.databuffer = collections.deque([])
         self.data_handler = self.got_data
         if not skipreplay:
             self.databuffer.extend(self.conshdl.get_recent())
     else:
         self.data_handler = datacallback
         if not skipreplay:
             for recdata in self.conshdl.get_recent():
                 if recdata:
                     datacallback(recdata)
     self.conshdl.attachsession(self)
Example #52
0
 def set_flavor():
     if self.name.endswith("_RESIZE_ERROR"):
         self._current_status = "ACTIVE"
         return
     if new_flavor_id is None:
         # Migrations are flavorless flavor resizes.
         # A resize MIGHT change the host, but a migrate
         # deliberately does.
         LOG.debug("Migrating fake instance.")
         eventlet.spawn_after(0.75, change_host)
     else:
         LOG.debug("Resizing fake instance.")
         self.old_flavor_ref = self.flavor_ref
         flavor = self.parent.flavors.get(new_flavor_id)
         self.flavor_ref = flavor.links[0]['href']
     eventlet.spawn_after(1, set_to_confirm_mode)
Example #53
0
    def extend(self, volume_id, new_size):
        LOG.debug("Resize volume id (%(volumeid)s) to size (%(size)s)" % {
            'volumeid': volume_id,
            'size': new_size
        })
        volume = self.get(volume_id)

        if volume._current_status != 'available':
            raise Exception("Invalid volume status: "
                            "expected 'in-use' but was '%s'" %
                            volume._current_status)

        def finish_resize():
            volume.size = new_size

        eventlet.spawn_after(1.0, finish_resize)
Example #54
0
def checkI2C():

	#eventlet.sleep(0.2)

	global running_seat_occupied
	if running_seat_occupied == False:
		global firstTrigger
		global occupied

		if occupied == True and firstTrigger == True:
			#set flags for the i2c events detected
			print "checki2c occupied"
			lowbyte = proxSensor1.readU8(0x5F)
			highbyte = proxSensor1.readU8(0x5E)
			byte1 = (highbyte << 3) | lowbyte

			if byte1 < 300: #anything closer?
				ledDriver.setPWM(UNDER_SEAT_PWM_R, 0, 4095)
				ledDriver.setPWM(UNDER_SEAT_PWM_G, 0, 4095)
				ledDriver.setPWM(UNDER_SEAT_PWM_B, 0, 4095)
				#sleep(10.0)
				print "sending lights high"
				underSeatOffThread = eventlet.spawn_after(10.0, underSeatOff)
				underSeatOffThread.wait()
				firstTrigger = False
Example #55
0
 def log(self, logdata=None, ltype=None, event=0, eventdata=None):
     if type(logdata) not in (bytes, unicode, dict):
         raise Exception("Unsupported logdata")
     if ltype is None:
         if type(logdata) == dict:
             logdata = json.dumps(logdata)
             ltype = 1
         elif self.isconsole:
             ltype = 2
         else:
             ltype = 0
     if self.closer is not None:
         self.closer.cancel()
         self.closer = None
     timestamp = int(time.time())
     if (len(self.logentries) > 0 and ltype == 2 and event == 0
             and self.logentries[-1][0] == 2
             and self.logentries[-1][1] == timestamp):
         self.logentries[-1][2] += logdata
         if eventdata is not None:
             self.logentries[-1][4] = eventdata
     else:
         self.logentries.append(
             [ltype, timestamp, logdata, event, eventdata])
     if self.buffered:
         if self.writer is None:
             self.writer = eventlet.spawn_after(2, self.writedata)
     else:
         self.writedata()
Example #56
0
    def request(self,
                key,
                dataindex,
                corrId="",
                callback=None,
                interval=0,
                timer=30,
                args={}):
        msg = d6message.d6msg(key=key,
                              replyTo=self.name,
                              correlationId=corrId,
                              type="req",
                              dataindex=dataindex,
                              body=args)

        corruuid = msg.correlationId
        self.subdata[corruuid] = dataobj.subData(key, dataindex, corruuid,
                                                 callback)

        if interval:
            self.scheduuids.add(corruuid)
            self.schedule(msg, corruuid, interval, callback)
        else:

            self.send(msg)

            if timer:
                self.timerThreads.append(
                    eventlet.spawn_after(timer, self.reqtimeout, corruuid))
Example #57
0
def try_assimilate(drone, followcount, remote):
    global retrythread
    try:
        remote = connect_to_collective(None, drone, remote)
    except socket.error:
        # Oh well, unable to connect, hopefully the rest will be
        # in order
        return
    tlvdata.send(
        remote, {
            'collective': {
                'operation': 'assimilate',
                'name': get_myname(),
                'followcount': followcount,
                'txcount': cfm._txcount
            }
        })
    tlvdata.recv(remote)  # the banner
    tlvdata.recv(remote)  # authpassed... 0..
    answer = tlvdata.recv(remote)
    if not answer:
        log.log({
            'error':
            'No answer from {0} while trying to assimilate'.format(drone),
            'subsystem':
            'collective'
        })
        return True
    if 'txcount' in answer:
        log.log({
            'info':
            'Deferring to {0} due to target being a better leader'.format(
                drone),
            'subsystem':
            'collective'
        })
        retire_as_leader(drone)
        if not connect_to_leader(None, None, leader=remote.getpeername()[0]):
            if retrythread is None:
                retrythread = eventlet.spawn_after(random.random(),
                                                   start_collective)
        return False
    if 'leader' in answer:
        # Will wait for leader to see about assimilation
        return True
    if 'error' in answer:
        log.log({
            'error':
            'Error encountered while attempting to '
            'assimilate {0}: {1}'.format(drone, answer['error']),
            'subsystem':
            'collective'
        })
        return True
    log.log({
        'info': 'Assimilated {0} into collective'.format(drone),
        'subsystem': 'collective'
    })
    return True
    def _schedule_in_memory(self, run_after, scheduled_job):
        green_thread = eventlet.spawn_after(
            run_after,
            self._process_memory_job,
            scheduled_job
        )

        self.memory_jobs[green_thread] = scheduled_job
Example #59
0
 def _report_stats():
     dump = {}
     for m in dump_metrics():
         dump[m['name']] = m['value']
     for metric in monitored_metrics:
         value = dump.get(metric)
         if value:
             if metric.startswith('collect:'):
                 # metrics starting with "collect:" are
                 # counters that will be reset once reported
                 stats.count(metric.split(':')[-1], value)
                 metric_name = metric.split('_count')[0]
                 counter(metric_name).clear()
             else:
                 stats.value(metric, value)
     logging.debug("metrics reported")
     eventlet.spawn_after(report_interval, _report_stats)
Example #60
0
  def __init__(self):
    self._shells = {} # Keys are (username, shell_id) tuples. Each user has his/her own set of shell ids.
    self._command_by_short_name = {} # Map each short name to its command (e.g. ["pig", "-l", "/dev/null"])
    self._meta = {} # Map usernames to utils.UserMetadata objects
    self._greenlets_by_hid = {} # Map each Hue Instance ID (HID) to greenlet currently fetching output for that HID.
    self._hids_by_pid = {} # Map each process ID (PID) to the HID whose greenlet is currently doing a "select" on the process's output fd.
    self._greenlets_to_notify = {} # For each PID, maintain a set of greenlets who are also interested in the output from that process, but are not doing the select.
    self._shells_by_fds = {} # Map each file descriptor to the Shell instance whose output it represents.
    self._greenlet_interruptable = {} # For each greenlet, store if it can be safely interrupted.
    self._env_by_short_name = {} # Map each short name to a dictionary which contains the environment for shells of that type.

    self._delegation_token_dir = shell.conf.SHELL_DELEGATION_TOKEN_DIR.get()
    if not os.path.exists(self._delegation_token_dir):
      os.mkdir(self._delegation_token_dir)

    self._parse_configs()
    eventlet.spawn_after(1, self._handle_periodic)