def runrealtime():
    try:
        client = Stomp(CONFIG)
        client.connect()
        # client.subscribe(QUEUE, {StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL})
        client.subscribe(QUEUE, {StompSpec.ACK_HEADER: StompSpec.ACK_AUTO})
        while True:
            try:
                frame = client.receiveFrame()
                # body=json.dumps(frame.body)
                body = json.loads(frame.body.decode())[0]
                insuranceType = body.get("insuranceType", [])[0]
                body['insuranceType'] = insuranceType
                companyId = body.get('companyId', [])
                #保险公司选择
                if len(companyId) == 0 or "2" in companyId:
                    client.send(body=json.dumps(body,
                                                ensure_ascii=False).encode(),
                                destination=EPICC)
                log.info('Got %s' % json.dumps(body, ensure_ascii=False))
                # client.ack(frame)
                body['client'] = client
                getSource(body)
            except Exception as e:
                log.error(e)
                log.error(frame.body)
                log.error(traceback.format_exc())

    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())
def runrealtime():
    try:
        client = Stomp(CONFIG)
        client.connect()
        client.subscribe(QUEUE, {StompSpec.ACK_HEADER: StompSpec.ACK_AUTO})
        log = Logger()
        while True:
            try:
                frame = client.receiveFrame()
                body = json.loads(frame.body.decode())
                plateNumber = body['plateNumber']
                # log.info('Got %s' % json.dumps(body,ensure_ascii=False))
                log.info('Got %s' % plateNumber)
                licenseType = body.get('licenseType', "02")
                log.info('Got licenseType %s' % licenseType)
                log.info("start ..............")
                cjbx_start(plateNumber, licenseType)
                log.info("end ..............")
            except Exception, e:
                log.error(e)
                log.error(frame.body)
                log.error(traceback.format_exc())

    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())
Beispiel #3
0
    def init_listener(self):
        if self.stomp_server is None or self.stomp_port is None:
            self.stomp_server = settings.INDEXER_STOMP_SERVER
            self.stomp_port = int(settings.INDEXER_STOMP_PORT)

        self.listener = Stomp(StompConfig('tcp://%s:%s' % (self.stomp_server, self.stomp_port)))
        self.listener.connect()
        logger.info('Connected to message queue on %s:%i',
                    self.stomp_server, self.stomp_port)
        self.listener.subscribe(settings.INDEXER_STOMP_CHANNEL,
                                {'ack': 'client'})  #  can we use auto-ack ?
        self.last_activity = datetime.now()
Beispiel #4
0
def runrealtime():
    try:
        client = Stomp(CONFIG)
        client.connect()
        # client.subscribe(QUEUE, {StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL})
        client.subscribe(BATCH_PROCESS_QUEUE,
                         {StompSpec.ACK_HEADER: StompSpec.ACK_AUTO})
        while True:
            try:
                frame = client.receiveFrame()
                body = json.loads(frame.body.decode())
                client.send(body=json.dumps(body, ensure_ascii=False).encode(),
                            destination=se.EPICC_BATCH_REPAIR_QUEUE)
                batch_repair_thread(body, client)
            except Exception, e:
                log.error(e)
                log.error(frame.body)
                log.error(traceback.format_exc())

    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())
Beispiel #5
0
class MessageClient:
    """
    STOMP sync client
    """
    def __init__(self, stomp_config: StompConfig):
        self.client = Stomp(stomp_config)

    def publish_model_score(self, score_date: date, score_value: float):
        """
        Publishes the score date and value on the message queue
        """
        self.client.connect(headers={'passcode': _MQ_PASS, 'login': _MQ_USER})
        message = 'date={0}\nvalue={1}'.format(score_date,
                                               str(score_value)).encode()
        self.client.send(_MQ_DEST, body=message)
        self.client.disconnect()

    def close(self):
        """
        Closes the session and transport, flushing the active subscription
        """
        self.client.close(flush=True)
Beispiel #6
0
def runrealtime():
    try:
        client = Stomp(CONFIG)
        client.connect()
        #client.subscribe(QUEUE, {StompSpec.ACK_HEADER: StompSpec.ACK_CLIENT_INDIVIDUAL})
        client.subscribe(BATCH_EPICC_QUEUE, {StompSpec.ACK_HEADER: StompSpec.ACK_AUTO})
        while True:
            try:
                frame = client.receiveFrame()
                body=json.loads(frame.body)
                log.info('Got %s' % json.dumps(body,ensure_ascii=False))
                epicc_batch(body,client)
            except Exception,e:
                log.error(e)
                log.error(frame.body)
                log.error(traceback.format_exc())

    except Exception as e:
            log.error(e)
            log.error(traceback.format_exc())
Beispiel #7
0
class Command(BaseCommand):
    """Service that listens for Fedora STOMP updates and processes those objects
       against the configured site indexes."""
    to_index = {}
    # delay before indexing
    # - since an object may be updated by several API calls in sequence, delay a bit
    #   so that hopefully we only index it once for a single group of updates
    # TODO: make delay configurable
    index_delay = 4  # time delay after the *first* modification message before indexing should be done
    index_delta = timedelta(seconds=index_delay)
    index_max_tries = 3  # number of times to try indexing an item, if a recoverable error happens

    stomp_server = None
    stomp_port = None
    listener = None

    # connection error handling/retry settings
    retry_reconnect_wait = 5
    # if we lose the connection to fedora, how long do we wait between attempts to reconnect?
    max_reconnect_retries = 5
    # how many times do we try to reconnect to fedora before we give up?

    idle_reconnect = None

    option_list = BaseCommand.option_list + (
        make_option('--max-reconnect-retries', type='int', dest='max_reconnect_retries',
                    default=max_reconnect_retries,
                    help='How many times to try reconnecting if the connection ' +
                    	 'is lost (default: %default; -1 for no maximum)'),
        make_option('--retry-reconnect-wait', type='int', dest='retry_reconnect_wait',
                    default=retry_reconnect_wait,
                    help='How many seconds to wait between reconnect attempts if the ' +
                    	 'connection is lost (default: %default)'),
        make_option('--index-max-tries', type='int', dest='index_max_tries',
                    default=index_max_tries,
                    help='Number of times to attempt indexing an item when a potentially ' +
                    	 'recoverable error is encountered'),
        make_option('--idle-reconnect', type='int', dest='idle_reconnect',
                    help='Reconnect when there has been no activity for the specified ' +
                         'number of minutes')

    )

    # flag will be set to True when a SIGINT has been received
    interrupted = False

    # class variables defined in setup
    indexes = []
    last_activity = None
    verbosity = None
    repo = None

    def init_listener(self):
        if self.stomp_server is None or self.stomp_port is None:
            self.stomp_server = settings.INDEXER_STOMP_SERVER
            self.stomp_port = int(settings.INDEXER_STOMP_PORT)

        self.listener = Stomp(StompConfig('tcp://%s:%s' % (self.stomp_server, self.stomp_port)))
        self.listener.connect()
        logger.info('Connected to message queue on %s:%i',
                    self.stomp_server, self.stomp_port)
        self.listener.subscribe(settings.INDEXER_STOMP_CHANNEL,
                                {'ack': 'client'})  #  can we use auto-ack ?
        self.last_activity = datetime.now()

    def init_indexes(self):
        # initialize all indexes configured in django settings
        self.indexes, init_errors = init_configured_indexes()
        if init_errors:
            msg = 'Error loading index configuration for the following sites:\n'
            for site, err in init_errors.iteritems():
                msg += '\t%s:\t%s\n' % (site, err)
                self.stdout.write(msg + '\n')

        if self.verbosity > self.v_normal:
            self.stdout.write('Indexing the following sites:\n')
            for site, index in self.indexes.iteritems():
                self.stdout.write('\t%s\n%s\n' % (site, index.config_summary()))

    # verbosity option set by django BaseCommand
    v_normal = 1	    # 1 = normal, 0 = minimal, 2 = all

    def handle(self, verbosity=v_normal, retry_reconnect_wait=None,
               max_reconnect_retries=None,  index_max_tries=None,
               idle_reconnect=None, *args, **options):
        # bind a handler for interrupt signal
        signal.signal(signal.SIGINT, self.interrupt_handler)
        signal.signal(signal.SIGHUP, self.hangup_handler)

        # verbosity should be set by django BaseCommand standard options
        self.verbosity = int(verbosity)

        # override retry/wait default settings if specified
        if retry_reconnect_wait:
            self.retry_reconnect_wait = retry_reconnect_wait
        if max_reconnect_retries:
            self.max_reconnect_retries = max_reconnect_retries
        if index_max_tries:
            self.index_max_tries = index_max_tries

        if idle_reconnect is not None:
            self.idle_reconnect = timedelta(minutes=idle_reconnect)

        # check for required settings

        self.repo = Repository()
        try:
            self.init_listener()
        except StompConnectTimeout:
            # if we can't connect on start-up, bail out
            raise CommandError('Error connecting to %s:%s' % (self.stomp_server, self.stomp_port) +
                               '\nCheck that Fedora is running and that messaging is enabled ' +
                               'and configured correctly')

        # load site index configurations
        self.init_indexes()

        while (True):

            # check time since last activity if idle reconnect is configured
            if self.idle_reconnect and \
                   datetime.now() - self.last_activity >= self.idle_reconnect:
                logger.info('Time since last activity has exceeded idle reconnect time [%s]',
                            self.idle_reconnect)
                # disconnect and reconnect the stomp listener
                self.listener.disconnect()
                self.reconnect_listener()


            # if we've received an interrupt, don't check for new messages
            if self.interrupted:
                # if we're interrupted but still have items queued for update,
                # sleep instead of listening to the queue
                if self.to_index:
                    sleep(self.index_delay)

            # no interrupt - normal behavior
            # check if there is a new message, but timeout after 3 seconds so we can process
            # any recently updated objects
            else:
                # check if there is a new message, but timeout after 3 seconds so we can process
                # any recently updated objects
                try:
                    data_available = self.listener.canRead(timeout=self.index_delay)

                except StompConnectionError as err:
                    # probably indicates fedora has gone down
                    logger.error('Received Stomp Connection error "%s"',  err)
                    self.reconnect_listener()
                    data_available = False

                except Exception as err:
                    # signals like SIGINT/SIGHUP get propagated to the socket
                    if self.interrupted:
                        pass
                    else:
                        logger.error('Error during Stomp listen: %s', err)
                    data_available = False

                # When Fedora is shut down, canRead returns True but we
                # get an exception on the receiveFrame call - catch that
                # error and try to reconnect
                try:

                    # if there is a new message, process it
                    if data_available:
                        frame = self.listener.receiveFrame()
                        self.last_activity = datetime.now()
                        # NOTE: could make use of message body instead of/in addition to headers
                        # (includes datastream id for modify datastream API calls)
                        pid = frame.headers['pid']
                        method = frame.headers['methodName']
                        logger.debug('Received message: %s %s', method, pid)
                        self.listener.ack(frame)

                        self.process_message(pid, method)

                except Exception as e:
                    # this most likely indicates that Fedora is no longer available;
                    # periodically attempt to reconnect (within some limits)
                    if isinstance(e, StompFrameError):
                        logger.error('Received Stomp frame error "%s"',  e)
                    else:
                        # in some cases, getting a generic Exception "Connection Closed"
                        # when Fedora shuts down
                        logger.error('Error listening to Stomp: %s', e)

                    # wait and try to re-establish the listener
                    # - will either return on success or raise a CommandError if
                    # it can't connect within the specified time/number of retries
                    self.reconnect_listener()

            #Process the index queue for any items that need it.
            self.process_queue()

            # if we've received an interrupt and there is nothing queued to index,
            # quit
            if self.interrupted and not self.to_index:
                return

    def reconnect_listener(self):
        '''Attempt to reconnect the listener, e.g. if Fedora is
        shutdown.  Waits the configured time between attemps to
        reconnect; will try to reconnect a configured number of times
        before giving up.'''

        # wait the configured time and try to re-establish the listener
        retry_count = 1
        while(retry_count <= self.max_reconnect_retries or self.max_reconnect_retries == -1):
            sleep(self.retry_reconnect_wait)
            try:
                self.listener = None
                self.init_listener()
                # if listener init succeeded, return for normal processing
                logger.info('Reconnect attempt %d succeeded', retry_count)
                return

            # if fedora is still not available, attempting to
            # listen will generate a socket error
            except StompError:
                # could be StompConnectTimeout, StompConnectionError, etc.

                try_detail = ''
                if self.max_reconnect_retries != -1:
                    try_detail = 'of %d ' % self.max_reconnect_retries
                logger.error('Reconnect attempt %d %sfailed; waiting %ds before trying again',
                             retry_count, try_detail, self.retry_reconnect_wait)
                retry_count += 1

        # if we reached the max retry without connecting, bail out
        # TODO: better error reporting - should this send an admin email?
        raise CommandError('Failed to reconnect to message queue after %d retries',
                           (retry_count - 1))

    def process_message(self, pid, method):
        # process an update message from fedora

        # when an object is purged from fedora, remove it from the index
        if method == 'purgeObject':
            # since we don't know which index (if any) this object was indexed in,
            # delete it from all configured indexes
            for site, index in self.indexes.iteritems():
                try:
                    index.delete_item(pid)
                except Exception as e:
                    logging.error("Failed to purge %s (%s): %s",
                                  pid, site, e)

                    # Add a prefix to the detail error message if we
                    # can identify what type of error this is.
                    detail_type = ''
                    if isinstance(e, SolrError):
                        detail_type = 'Solr Error: '
                    action_str = 'Purge: '
                    msg = '%s%s%s' % (detail_type, action_str, e)
                    err = IndexError(object_id=pid, site=site, detail=msg)
                    err.save()
            logger.info('Deleting %s from all configured Solr indexes', pid)
            # commit?

        # ingest, modify object or modify datastream
        else:
            # if the object isn't already in the queue to be indexed, check if it should be
            if pid not in self.to_index:
                # get content models from resource index
                obj_cmodels = list(self.repo.risearch.get_objects('info:fedora/%s' % pid,
                                                                  modelns.hasModel))
                # may include generic content models, but should not be a problem

                # find which configured site(s) index the item
                for site, index in self.indexes.iteritems():
                    if index.indexes_item(obj_cmodels):
                        if pid not in self.to_index:
                            # first site found - create a queue item and add to the list
                            self.to_index[pid] = QueueItem(site)
                        else:
                            # subsequent site - add the site to the existing queue item
                            self.to_index[pid].add_site(site)

    def process_queue(self):
        '''Loop through items that have been queued for indexing; if
        the configured delay time has passed, then attempt to index
        them, and log any indexing errors.'''
        #check if there are any items that should be indexed now
        if self.to_index:
            logger.debug('Objects queued to be indexed: %s',
                         ', '.join(self.to_index.keys()))

            queue_remove = []
            for pid in self.to_index.iterkeys():
                # if we've waited the configured delay time, attempt to index
                if datetime.now() - self.to_index[pid].time >= self.index_delta:
                    sites_to_index = self.to_index[pid].sites_to_index

                    logger.info('Indexing %s in %s',
                        pid, ', '.join(sites_to_index))

                    # a single object could be indexed by multiple sites; index all of them
                    for site in sites_to_index:
                        self.index_item(pid, self.to_index[pid], site)

                    if not self.to_index[pid].sites_to_index:
                        # if all configured sites indexed successfully
                        # or failed and should not be re-indexed,
                        # store pid to be removed from the queue
                        queue_remove.append(pid)


            # clear out any pids that were indexed successfully OR
            # errored from the list of objects still to be indexed
            for pid in queue_remove:
                del self.to_index[pid]

    def index_item(self, pid, queueitem, site):
        '''Index an item in a single configured site index and handle
        any errors, updating the queueitem retry count and marking
        sites as indexed according to success or any errors.

        :param pid: pid for the item to be indexed
        :param queueitem: :class:`QueueItem`
        :param site: name of the site index to use
        '''
        try:
            # tell the site index to index the item - returns True on success
            if self.indexes[site].index_item(pid):
                # mark the site index as complete on the queued item
                self.to_index[pid].site_complete(site)

        except RecoverableIndexError as rie:
            # If the index attempt resulted in error that we
            # can potentially recover from, keep the item in
            # the queue and attempt to index it again.

            # Increase the count of index attempts, so we know when to stop.
            self.to_index[pid].tries += 1

            # quit when we reached the configured number of index attempts
            if self.to_index[pid].tries >= self.index_max_tries:
                logger.error("Failed to index %s (%s) after %d tries: %s",
                              pid, site, self.to_index[pid].tries, rie)

                err = IndexError(object_id=pid, site=site,
                                 detail='Failed to index after %d attempts: %s' % \
                                 (self.to_index[pid].tries, rie))
                err.save()
                # we've hit the index retry limit, so set site as complete on the queue item
                self.to_index[pid].site_complete(site)

            else:
                logging.warn("Recoverable error attempting to index %s (%s), %d tries: %s",
                             pid, site, self.to_index[pid].tries, rie)

                # update the index time - wait the configured index delay before
                # attempting to reindex again
                self.to_index[pid].time = datetime.now()

        except Exception as e:
            logging.error("Failed to index %s (%s): %s",
                          pid, site, e)

            # Add a prefix to the detail error message if we
            # can identify what type of error this is.
            detail_type = ''
            if isinstance(e, SolrError):
                detail_type = 'Solr Error: '
            msg = '%s%s' % (detail_type, e)
            err = IndexError(object_id=pid, site=site,
                             detail=msg)
            err.save()

            # any exception not caught in the recoverable error block
            # should not be attempted again - set site as complete on queue item
            self.to_index[pid].site_complete(site)


    def interrupt_handler(self, signum, frame):
        '''Gracefully handle a SIGINT, if possible.  Reports status if
        main loop is currently part-way through pages for a volume,
        sets a flag so main script loop can exit cleanly, and restores
        the default SIGINT behavior, so that a second interrupt will
        stop the script.
        '''
        if signum == signal.SIGINT:
            # restore default signal handler so a second SIGINT can be used to quit
            signal.signal(signal.SIGINT, signal.SIG_DFL)
            # set interrupt flag so main loop knows to quit as soon as it can
            self.interrupted = True

            if self.verbosity >= self.v_normal:
                # log as well as printing to stdout (for manual run & init.d-type service run)
                msg = 'SIGINT received; stopping'
                self.stdout.write('%s\n' % msg)
                logger.info(msg)

            # report if indexer currently has items queued for indexing
            if self.to_index:
                msg = '\n%d item(s) currently queued for indexing.\n' % len(self.to_index.keys())
                msg += 'Indexer will stop listening for updates and exit after currently queued items have been indexed.\n'
                # log summary information
                logger.info(msg)
                msg += '(Ctrl-C / Interrupt again to quit immediately)\n'
                self.stdout.write(msg)

            self.stdout.flush()

    def hangup_handler(self, signum, frame):
        '''On SIGHUP, reload site index configurations and
        reinitialize connections to Solr.
        '''
        if signum == signal.SIGHUP:
            # it would be even better if we could reload django settings here,
            # but I can't figure out a way to do that...

            if self.verbosity >= self.v_normal:
                # log as well as printing to stdout
                msg = 'SIGHUP received; reloading site index configurations.'
                self.stdout.write(msg)
                logger.info(msg)
            # reload site indexes
            self.init_indexes()
Beispiel #8
0
def send_score_to_message_queue(date, score):
    client = Stomp(StompConfig('tcp://fmapiclient.cs.ucl.ac.uk:7672', version=StompSpec.VERSION_1_0))
    client.connect(headers={'passcode': 'admin', 'login': '******'})
    message = 'date={0}\nvalue={1}'.format(date, str(score))
    client.send('/queue/PubModelScore.Q', body=message)
    client.disconnect()
Beispiel #9
0
 def __init__(self, stomp_config: StompConfig):
     self.client = Stomp(stomp_config)
        mg_update_insert('hebaoinfo', query, BODY)
        log.info(u'更新数据库成功 - {0}'.format(appNo))
    except Exception as e:
        log.error(e)
        log.error(traceback.format_exc())


if __name__ == "__main__":
    try:
        sessBase = r.get("12_COMPANY")
        if not sessBase:
            session = login_ancheng()
        else:
            session = pickle.loads(codecs.decode(sessBase.encode(), "base64"))

        client = Stomp(CONFIG)
        client.connect()

        def hebao_job():
            appno_list = r.smembers(appno_key)
            for appno in appno_list:
                get_hebao(session, appno)

        import logging

        logging.basicConfig()
        from apscheduler.schedulers.blocking import BlockingScheduler

        sched = BlockingScheduler()
        sched.add_job(hebao_job, 'interval', seconds=60 * 10)
        sched.start()