예제 #1
0
파일: http.py 프로젝트: westonpace/bowser
class HttpService(object):
    
    def __init__(self):
        self.__async_executor = ThreadPoolExecutor(max_workers=10)
        self.logger = logging.getLogger(__name__)
        self.__http = Http()
    
    def get(self, request):
        return self.make_request(request, 'GET')
    
    def post(self, request):
        return self.make_request(request, 'POST')
    
    def put(self, request):
        return self.make_request(request, 'PUT')
    
    def delete(self, request):
        return self.make_request(request, 'DELETE')
    
    def make_request(self, request, method):
        future = HttpFuture()
        self.__async_executor.submit(self.__do_request, request, method, future)
        return future

    def __do_request(self, request, method, future):
        try:
            uri = request.url + urllib.parse.urlencode(request.parameters)
            headers, content = self.__http.request(uri, method, request.data, request.headers)
            future.fulfill(headers, content)
        except Exception as ex:
            self.logger.exception("Http __do_request attempt failed with exception")
예제 #2
0
파일: _futures.py 프로젝트: delmic/odemis
    def __init__(self):
        # just a big number of workers
        ThreadPoolExecutor.__init__(self, max_workers=100)
        self._queue = collections.deque()  # thread-safe queue of futures
        self._waiting_work = collections.deque()  # tuple (WorkItem, future, set=dependences)

        # (dict id(future) -> set): futures running -> dependences used
        self._sets_in_progress = {}
        self._set_remove = threading.Lock()
예제 #3
0
파일: thread.py 프로젝트: shirdrn/python
class ThreadedPoolExecutor(PoolExecutor):
    '''
    Pooled executor implementation based on a wrapped
    ThreadPoolExecutor object.
    '''
    def __init__(self, context, max_workers=1):
        super(ThreadedPoolExecutor, self).__init__(context)
        self._pool = ThreadPoolExecutor(max_workers)
    
    def execute(self, task):
        self._pool.submit(task.processor)
예제 #4
0
def test_log_traceback_threaded(caplog):
    @log_traceback
    def f():
        raise Exception()

    e = ThreadPoolExecutor(max_workers=1)
    f = e.submit(f)
    while f.running():
        time.sleep(0.1)

    assert caplog.records()[0].message.endswith(" is about to be started")
    assert caplog.records()[1].message.startswith("Traceback")
    assert caplog.records()[1].message.endswith("Exception\n")
예제 #5
0
 def __init__(self, name, detector, dataflow, emitter,
              ccd, stage, focus, shiftebeam=MTD_MD_UPD, **kwargs):
     """
     shiftebeam (MTD_*): if MTD_EBEAM_SHIFT, will correct the SEM position using beam shift
      (iow, using emitter.shift). If MTD_MD_UPD, it will just update the
      position correction metadata on the SEM images.
     ccd (Optical detector)
     stage (actuator): the sample stage, just to know when re-alignment is needed
     focus (actuator): the _optical_ focuser, just to know when re-alignment is needed
     focuser (actuator): the _e-beam_ focuser, to allow focusing the image
     """
     super(AlignedSEMStream, self).__init__(name, detector, dataflow, emitter, **kwargs)
     self._ccd = ccd
     self._stage = stage
     self._focus = focus
     self._shiftebeam = shiftebeam
     self.calibrated = model.BooleanVA(False)  # whether the calibration has been already done
     self._last_pos = stage.position.value.copy()
     self._last_pos.update(focus.position.value)  # last known position of the stage
     self._shift = (0, 0)  # (float, float): shift to apply in meters
     self._last_shift = (0, 0)  # (float, float): last ebeam shift applied
     # In case initialization takes place in unload position the
     # calibration values are not obtained yet. Thus we avoid to initialize
     # cur_trans before spot alignment takes place.
     self._cur_trans = None
     stage.position.subscribe(self._onMove)
     focus.position.subscribe(self._onMove)
     self._executor = ThreadPoolExecutor(max_workers=1)
     self._beamshift = None
예제 #6
0
파일: _live.py 프로젝트: delmic/odemis
 def __init__(self, name, detector, dataflow, emitter,
              ccd, stage, focus, shiftebeam=MTD_MD_UPD, **kwargs):
     """
     shiftebeam (MTD_*): if MTD_EBEAM_SHIFT, will correct the SEM position using beam shift
      (iow, using emitter.shift). If MTD_MD_UPD, it will just update the
      position correction metadata on the SEM images.
     ccd (Optical detector)
     stage (actuator): the sample stage, just to know when re-alignment is needed
     focus (actuator): the _optical_ focuser, just to know when re-alignment is needed
     focuser (actuator): the _e-beam_ focuser, to allow focusing the image
     """
     if "acq_type" not in kwargs:
         kwargs["acq_type"] = model.MD_AT_EM
     super(AlignedSEMStream, self).__init__(name, detector, dataflow, emitter, **kwargs)
     self._ccd = ccd
     self._stage = stage
     self._focus = focus
     self._shiftebeam = shiftebeam
     self.calibrated = model.BooleanVA(False)  # whether the calibration has been already done
     self._last_pos = stage.position.value.copy()
     self._last_pos.update(focus.position.value)  # last known position of the stage
     stage.position.subscribe(self._onMove)
     focus.position.subscribe(self._onMove)
     self._executor = ThreadPoolExecutor(max_workers=1)
     self._beamshift = None
예제 #7
0
    def __init__(self):
        # Scene processors.
        self.__sceneController = SceneController.instance()
        self.__methodController = MethodController.instance()

        self.__sceneUpdateThreadPool = ThreadPoolExecutor(max_workers=1)
        self.__sceneExecThreadPool = ThreadPoolExecutor(max_workers=AppConstants.MAX_SCENE_EXEC_THREAD_SIZE)
        self.__sceneExecutionResultThreadPool = ThreadPoolExecutor(max_workers=5)

        # Scene run workers.
        self.__sceneExecLocks = {}
        
        # Favorite edit lock
        self.__fav_lock = threading.Lock()

        # Listeners.
        GroupController.instance().listen_to_group_icon_change(self.__on_group_icon_changed)
        self.__methodController.listen_to_method_status_change(self.__on_method_status_changed)
예제 #8
0
    def __init__(self, backbone, brain=None):
        self._backbone = backbone
        self._brain = brain if brain is not None else Brain()
        self._brain_lock = threading.Lock()
        self._regex_to_response = OrderedDict()
        self._scripts = OrderedDict()

        self._pool = ThreadPoolExecutor(max_workers=4)
        self._futures = []  # list of futures submitted to the pool
        self._stop_loop = threading.Event()
예제 #9
0
파일: srv.py 프로젝트: natemago/srv
 def __init__(self, server_address, RequestHandlerClass,
              bind_and_activate=True, handlers=[],
              srv_path=".",
              configuration={}):
     HTTPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
     self.handlers = sorted(handlers, key=lambda k: k["weight"])
     self.srv_path = srv_path
     self.configuration = configuration
     self.logger = self.setup_logger()
     self.executor = ThreadPoolExecutor(max_workers=20)
     self.initialize_server()
예제 #10
0
파일: __init__.py 프로젝트: ifrpl/toddler
    def __init__(self, rabbitmq_url=None, queue=None, routing_key=None,
                 exchange="message", exchange_type="direct", log=None,
                 max_tasks=5, logging=None):
        """

        == Config dict structure (case adjusted to json configuration):
        {
            "rabbit": {
                "url": "apmq://rabbit",
                "queue": "test",
                "routingKey": "example.json"
                "exchange": "message", // optional, default: message
                "exchangeType:" "topic" // optional, default: topic
            }
        }

        :param str rabbitmq_url: optional url to rabbitmq
        :param str queue: name of the queue
        :param str routing_key: routing key for queue
        :param str exchange: name of the exchange
        :param str exchange_type: type of the exchange
        :param dict config: Manager configuration from parsed json config all
                            the above options can be configured from it
        :param logging.Logger log: optional logger that will replace new one
        :raises exceptions.NotConfigured:
        :return:
        """

        if queue is None:
            raise exceptions.NotConfigured("Misssing queue")

        self._connection = None
        self._channel = None
        self._closing = False
        self._consumer_tag = None
        self._max_tasks = max_tasks  # 2 cores + 1
        self._tasks_number = 0
        self._executor = ThreadPoolExecutor(max_workers=self._max_tasks)
        self._max_tasks_warning_counter = 0

        self._rabbitmq_url = rabbitmq_url
        self._queue = queue
        self._routing_key = routing_key
        self._exchange = exchange
        self._exchange_type = exchange_type

        if log is None:
            from toddler.logging import setup_logging
            if logging is not None:
                self.log = setup_logging(config=logging)
            else:
                self.log = setup_logging()
        else:
            self.log = log
예제 #11
0
 def __init__(self):
     # Rule processors.
     self.__ruleController = RuleController()
     self.__methodController = MethodController()
     self.__triggerController = TriggerController.instance()
     
     self.__ruleUpdateThreadPool = ThreadPoolExecutor(max_workers=1)
     self.__ruleExecThreadPool = ThreadPoolExecutor(max_workers=AppConstants.MAX_RULE_EXEC_THREAD_SIZE)
     
     # Rule run workers.
     self.__ruleExecInfos = {}
     self.__condCallGroup = MethodCallGroup()
     self.__execCallGroup = MethodCallGroup()
     
     # Listeners.
     self.__ruleController.listen_to_rule_status_change(self.__on_rule_status_changed)
     GroupController.instance().listen_to_group_icon_change(self.__on_group_icon_changed)
     self.__methodController.listen_to_method_status_change(self.__on_method_status_changed)
     EventController.instance().listen_to_event_callback(self.__on_method_event_callback)
     self.__triggerController.listen_to_trigger_callback(self.__on_trigger_callback)
 def __init__(self):
     self.__lock = threading.Lock()
     self.__serLock = SERLock()
     self.__serController = SceneExecutionResultController()
     self.__serRetryThreadPool = ThreadPoolExecutor(max_workers=3)
     
     def after_ser_deleted(serId):
         self.__serLock.declare_as_deleted(serId) # Report serId is deleted to SERLock instance. 
         self.__broadcast_message__ser_deleted(serId) # Broadcast event to mobile client.
     
     SceneExecutionResultDataHandler.AFTER_RECORD_DELETE = after_ser_deleted
     SceneExecutionResultService.__INSTANCE = self
예제 #13
0
 def __init__(self, max_workers=5):
     '''
     This is multi-threaded caller.
     max_workers - Maximum simultaneous threads.
     async - Set it to True then "submit" will not wait for the response.
     '''
     self.__callId_callbackList = {} # 1-level dict, [callId] -- [callback tasks]
     self.__fs_callbackList = {} # 1-level dict, [fs] = [callback tasks]
     self.__fs_callId = {}
     self.__threadPool = ThreadPoolExecutor(max_workers=max_workers)
     self.__lock = Lock()
     self.__taskId = 0
예제 #14
0
파일: srv.py 프로젝트: natemago/srv
class DispatcherHTTPServer(HTTPServer):
    def __init__(self, server_address, RequestHandlerClass,
                 bind_and_activate=True, handlers=[],
                 srv_path=".",
                 configuration={}):
        HTTPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
        self.handlers = sorted(handlers, key=lambda k: k["weight"])
        self.srv_path = srv_path
        self.configuration = configuration
        self.logger = self.setup_logger()
        self.executor = ThreadPoolExecutor(max_workers=20)
        self.initialize_server()
        
    def initialize_server(self):
        self.logger.info('Initializing server')
    
    def finish_request(self, request, client_address):
        def async_finish_request(server, request, client_address, logger):
            server.RequestHandlerClass(request, client_address, server, logger)
        self.executor.submit(async_finish_request(self, request, client_address, self.logger))
    
    def setup_logger(self):
        logger = None
        if self.configuration.get('log_config_file') is not None:
            logger = self.get_logger(self.configuration.get('log_config_file'))
        else:
            logger = self.get_default_logger()
        return logger
        
    def get_logger(self, config_file):
        logging.config.fileConfig(config_file)
        return logging.getLogger('srv')
    
    def get_default_logger(self):
        logging.basicConfig(level=logging.INFO)
        return logging.getLogger('srv')
예제 #15
0
def prepareServer(RequestHandlerClass, pipe, threads, timeout):
    '''
    Prepare in a process the request handling.
    '''
    def process(request, address):
        RequestHandlerClass(request, address, None)
        try: request.shutdown(socket.SHUT_WR)
        except socket.error: pass  # some platforms may raise ENOTCONN here
        request.close()
    
    pool = ThreadPoolExecutor(threads)
    while True:
        if not pipe.poll(timeout): break
        else:
            data = pipe.recv()
            if data is None: break
            elif data is True: continue
            
            requestfd, address = data
            request = socket.fromfd(rebuild_handle(requestfd), socket.AF_INET, socket.SOCK_STREAM)
            
            pool.submit(process, request, address)
            
    pool.shutdown(False)
예제 #16
0
 def __init__(self, name, daemon):
     model.Component.__init__(self, name=name, daemon=daemon)
     self.executor = ThreadPoolExecutor(max_workers=1)
     self.number_futures = 0
     self.startAcquire = model.Event() # triggers when the acquisition of .data starts
     self.data = FakeDataFlow(sae=self.startAcquire)
     self.datas = SynchronizableDataFlow()
     
     self.data_count = 0
     self._df = None
     
     # TODO automatically register the property when serializing the Component
     self.prop = model.IntVA(42)
     self.cont = model.FloatContinuous(2.0, [-1, 3.4], unit="C")
     self.enum = model.StringEnumerated("a", set(["a", "c", "bfds"]))
     self.cut = model.IntVA(0, setter=self._setCut)
     self.listval = model.ListVA([2, 65])
예제 #17
0
파일: init.py 프로젝트: bobiwembley/sen
    def __init__(self):
        self.d = DockerBackend()

        # root widget
        self.mainframe = urwid.Frame(urwid.SolidFill())
        self.buffers = []
        self.footer = Footer(self)

        self.executor = ThreadPoolExecutor(max_workers=4)

        root_widget = urwid.AttrMap(self.mainframe, "root")
        self.main_list_buffer = None  # singleton

        screen = urwid.raw_display.Screen()
        screen.set_terminal_properties(256)
        screen.register_palette(PALLETE)

        super().__init__(root_widget, screen=screen)
        self.handle_mouse = False
        self.current_buffer = None
    def __init__(self, address, name, keep_alive, thread_pool_size):
        Server._number += 1

        # Logger actually overridden by subclasses.
        self._log = logging.getLogger("lightstreamer-adapter.server")
        self._exception_handler = None
        self._config = {}
        self._config['address'] = address
        self._config['name'] = "#{}".format(Server._number) if (name is
                                                                None) else name
        self._config['keep_alive'] = max(0, keep_alive) if (keep_alive is not
                                                            None) else 0
        pool = max(0, thread_pool_size) if thread_pool_size is not None else 0
        if pool == 0:
            try:
                self._config['thread_pool_size'] = cpu_count()
            except NotImplementedError:
                self._config['thread_pool_size'] = Server._DEFAULT_POOL_SIZE
        else:
            self._config['thread_pool_size'] = pool

        self._executor = ThreadPoolExecutor(self._config['thread_pool_size'])
        self._server_sock = None
        self._request_receiver = None
예제 #19
0
파일: consts.py 프로젝트: Liu0330/zufang
from concurrent.futures.thread import ThreadPoolExecutor

import boto3
import qiniu

MAX_READ_SIZE = 64 * 1024

QINIU_ACCESS_KEY = 'KarvlHfUdoG1mZNSfDVS5Vh3nae2jUZumTBHK-PR'
QINIU_SECRET_KEY = 'SFPFkAn5NENhdCMqMe9wd_lxGHAeFR5caXxPTtt7'
QINIU_BUCKET_NAME = 'zufangwang'

AUTH = qiniu.Auth(QINIU_ACCESS_KEY, QINIU_SECRET_KEY)

# AWS3_REGION = 'region_name'
# AWS3_AK = 'access_key'
# AWS3_SK = 'secret_key'
# AWS3_BUCKET = 'bucket_name'
#
# S3 = boto3.client('s3', region_name=AWS3_REGION,
#                   aws_access_key_id=AWS3_AK, aws_secret_access_key=AWS3_SK)

# sysbench ---> 测试整机性能的工具
MAX_THREAD_WORKERS = 64
EXECUTOR = ThreadPoolExecutor(max_workers=MAX_THREAD_WORKERS)
예제 #20
0
class AlignedSEMStream(SEMStream):
    """
    This is a special SEM stream which automatically first aligns with the
    CCD (using spot alignment) every time the stage position changes.
    Alignment correction can either be done via beam shift (=shift), or
    by just updating the image position.
    """
    def __init__(self, name, detector, dataflow, emitter,
                 ccd, stage, focus, shiftebeam=MTD_MD_UPD, **kwargs):
        """
        shiftebeam (MTD_*): if MTD_EBEAM_SHIFT, will correct the SEM position using beam shift
         (iow, using emitter.shift). If MTD_MD_UPD, it will just update the
         position correction metadata on the SEM images.
        ccd (Optical detector)
        stage (actuator): the sample stage, just to know when re-alignment is needed
        focus (actuator): the _optical_ focuser, just to know when re-alignment is needed
        focuser (actuator): the _e-beam_ focuser, to allow focusing the image
        """
        super(AlignedSEMStream, self).__init__(name, detector, dataflow, emitter, **kwargs)
        self._ccd = ccd
        self._stage = stage
        self._focus = focus
        self._shiftebeam = shiftebeam
        self.calibrated = model.BooleanVA(False)  # whether the calibration has been already done
        self._last_pos = stage.position.value.copy()
        self._last_pos.update(focus.position.value)  # last known position of the stage
        self._shift = (0, 0)  # (float, float): shift to apply in meters
        self._last_shift = (0, 0)  # (float, float): last ebeam shift applied
        # In case initialization takes place in unload position the
        # calibration values are not obtained yet. Thus we avoid to initialize
        # cur_trans before spot alignment takes place.
        self._cur_trans = None
        stage.position.subscribe(self._onMove)
        focus.position.subscribe(self._onMove)
        self._executor = ThreadPoolExecutor(max_workers=1)
        self._beamshift = None

    def _onMove(self, pos):
        """
        Called when the stage moves (changes position)
        pos (dict): new position
        """
        # Check if the position has really changed, as some stage tend to
        # report "new" position even when no actual move has happened
        logging.debug("Stage location is %s m,m,m", pos)
        if self._last_pos == pos:
            return
        self._last_pos.update(pos)

        # if self.is_active.value:
        self.calibrated.value = False

        # just reset status
        self._setStatus(None)

    # need to override it to support beam shift
    def _applyROI(self):
        """
        Update the scanning area of the SEM according to the roi
        """
        res, shift = self._computeROISettings(self.roi.value)

        if (self._shiftebeam == MTD_EBEAM_SHIFT) and (self._beamshift is not None):
            shift = tuple(s + c for s, c in zip(shift, self._beamshift))

        # always in this order
        self._emitter.resolution.value = res
        self._emitter.shift.value = shift

    def _compensateShift(self):
        """
        Compensate the SEM shift, using either beam shift or metadata update
        """
        # update the correction metadata
        logging.debug("Update metadata for SEM image shift")
        self._detector.updateMetadata({MD_POS_COR: self._shift})

    def _prepare(self):
        """
        Perform calibration if needed
        """
        logging.debug("Preparing stream %s ...", self)
        # actually indicate that preparation has been triggered, don't wait for
        # it to be completed
        self._prepared = True
        f = self._executor.submit(self._DoPrepare)

        # Note that there is no need to call super(). This would only check
        # for an optical path manager which in this case has no effect.

        return f

    def __del__(self):
        self._executor.shutdown(wait=False)

    def _DoPrepare(self):
        # Need to calibrate ?
        if not self.calibrated.value:
            self._setStatus(logging.INFO, u"Automatic SEM alignment in progress…")
            # store current settings
            no_spot_settings = (self._emitter.dwellTime.value,
                                self._emitter.resolution.value)
            # Don't mess up with un/subscribing while doing the calibration
            self._getEmitterVA("dwellTime").unsubscribe(self._onDwellTime)
            self._getEmitterVA("resolution").unsubscribe(self._onResolution)

            shift = (0, 0)
            self._beamshift = None
            try:
                logging.info("Determining the Ebeam center position")
                # TODO Handle cases where current beam shift is larger than
                # current limit. Happens when accel. voltage is changed
                self._emitter.shift.value = (0, 0)
                shift = FindEbeamCenter(self._ccd, self._detector, self._emitter)
                logging.debug("Spot shift is %s m,m", shift)
                self._beamshift = shift
                # Also update the last beam shift in order to be used for stage
                # offset correction in the next stage moves
                self._last_shift = (0.75 * self._last_shift[0] - 0.25 * shift[0],
                                    0.75 * self._last_shift[1] - 0.25 * shift[1])
                cur_trans = self._stage.getMetadata().get(model.MD_POS_COR, (0, 0))
                self._cur_trans = (cur_trans[0] - self._last_shift[0],
                                   cur_trans[1] - self._last_shift[1])
                self._stage.updateMetadata({
                    model.MD_POS_COR: self._cur_trans
                })
                logging.debug("Compensated stage translation %s m,m", self._cur_trans)
                if self._shiftebeam == MTD_EBEAM_SHIFT:
                    # First align using shift
                    self._applyROI()
                    # Then by updating the metadata
                    shift = (0, 0)  # just in case of failure
                    shift = FindEbeamCenter(self._ccd, self._detector, self._emitter)
                elif self._shiftebeam == MTD_MD_UPD:
                    pass
                else:
                    raise NotImplementedError("Unknown shiftbeam method %s" % (self._shiftebeam,))
            except LookupError:
                self._setStatus(logging.WARNING, (u"Automatic SEM alignment unsuccessful", u"Need to focus all streams"))
                # logging.warning("Failed to locate the ebeam center, SEM image will not be aligned")
            except Exception:
                self._setStatus(logging.WARNING, (u"Automatic SEM alignment unsuccessful", u"Need to focus all streams"))
                logging.exception("Failure while looking for the ebeam center")
            else:
                self._setStatus(None)
                logging.info("Aligning SEM image using shift of %s", shift)
                self.calibrated.value = True
            finally:
                # restore hw settings
                (self._emitter.dwellTime.value,
                 self._emitter.resolution.value) = no_spot_settings
                self._getEmitterVA("dwellTime").subscribe(self._onDwellTime)
                self._getEmitterVA("resolution").subscribe(self._onResolution)

            self._shift = shift
            self._compensateShift()
예제 #21
0
import logging
import pickle
from concurrent.futures.thread import ThreadPoolExecutor

import colorlog
from colorlog import ColoredFormatter

import source.pagination as pagination

paginator = pagination

thread_pool = ThreadPoolExecutor(max_workers=2)  # a thread pool

discordCharLimit = 2000

logging.SPAM = 9
logging.addLevelName(logging.SPAM, "SPAM")


def spam(self, message, *args, **kws):
    self._log(logging.SPAM, message, args, **kws)


logging.Logger.spam = spam


def getLog(filename, level=logging.DEBUG) -> logging:
    """ Sets up logging, to be imported by other files """
    streamHandler = colorlog.StreamHandler()
    streamFormatter = ColoredFormatter(
        "{asctime} {log_color}|| {levelname:^8} || {name:^15s} || {reset}{message}",
예제 #22
0
파일: __init__.py 프로젝트: ifrpl/toddler
class RabbitManager(BaseManager):
    """Base for managers that connects to rabbit

    """
    def __init__(self, rabbitmq_url=None, queue=None, routing_key=None,
                 exchange="message", exchange_type="direct", log=None,
                 max_tasks=5, logging=None):
        """

        == Config dict structure (case adjusted to json configuration):
        {
            "rabbit": {
                "url": "apmq://rabbit",
                "queue": "test",
                "routingKey": "example.json"
                "exchange": "message", // optional, default: message
                "exchangeType:" "topic" // optional, default: topic
            }
        }

        :param str rabbitmq_url: optional url to rabbitmq
        :param str queue: name of the queue
        :param str routing_key: routing key for queue
        :param str exchange: name of the exchange
        :param str exchange_type: type of the exchange
        :param dict config: Manager configuration from parsed json config all
                            the above options can be configured from it
        :param logging.Logger log: optional logger that will replace new one
        :raises exceptions.NotConfigured:
        :return:
        """

        if queue is None:
            raise exceptions.NotConfigured("Misssing queue")

        self._connection = None
        self._channel = None
        self._closing = False
        self._consumer_tag = None
        self._max_tasks = max_tasks  # 2 cores + 1
        self._tasks_number = 0
        self._executor = ThreadPoolExecutor(max_workers=self._max_tasks)
        self._max_tasks_warning_counter = 0

        self._rabbitmq_url = rabbitmq_url
        self._queue = queue
        self._routing_key = routing_key
        self._exchange = exchange
        self._exchange_type = exchange_type

        if log is None:
            from toddler.logging import setup_logging
            if logging is not None:
                self.log = setup_logging(config=logging)
            else:
                self.log = setup_logging()
        else:
            self.log = log

    def reconnect(self):
        """Will be run by IOLoop.time if the connection is closed.
        See on_connection_closed method.
        """
        self._connection.ioloop.stop()

        if not self._closing:
            self._connection = self.connect()
            self._connection.ioloop.start()

    @property
    def queue(self):
        return self._queue

    def on_connection_closed(self, connection, reply_code, reply_text):
        """

        :param pika.connection.Connection connection: closed connection ob
        :param int reply_code: reply code if given
        :param str reply_text: reply text if given
        :return:
        """
        self._channel = None
        if self._closing:
            self._connection.ioloop.stop()
        else:
            self.log.warning(
                "Connection closed, will reopen in 5 seconds: (%s) %s",
                reply_code,
                reply_text
            )

            self._connection.add_timeout(5, self.reconnect)

    def on_channel_closed(self, channel, reply_code, reply_text):
        """Invoked when channel has been closed

        :param pika.channel.Channel channel:
        :param int reply_code:
        :param str reply_text:
        :return:
        """
        self.log.info("Channel to rabbit closed.")
        self._connection.close()

    def on_channel_open(self, channel):
        """Invoked when channel has been opened

        :param pika.channel.Channel channel:
        """
        self.log.info("Channel opened")
        self._channel = channel
        self._channel.add_on_close_callback(self.on_channel_closed)
        self.start_consuming()

    def close_channel(self):
        self.log.info("Closing channel")
        self._channel.close()

    def open_channel(self):
        self.log.info("Opening channel")
        self._connection.channel(on_open_callback=self.on_channel_open)

    def on_connection_open(self, connection):

        self.log.info("Connected")
        self._connection = connection
        self._connection.add_on_close_callback(self.on_connection_closed)
        self.open_channel()

    def connect(self):
        """Connects to rabbitmq server, according to config
        :return pika.SelectConnection:
        """
        self.log.info("Connecting to RabbitMQ")
        return pika.BlockingConnection(
            pika.URLParameters(self._rabbitmq_url + "?heartbeat_interval=5"),
            # self.on_connection_open,
            # stop_ioloop_on_close=False

        )
    
    def on_cancel_ok(self, frame):
        """Invoked when locale Basic.Cancel is acknowledged by RabbitMQ

        :param pika.frame.Method frame:
        :return:
        """

        self.log.info("Rabbit acknowledged the cancel of the consumer")
        self.close_channel()

    def on_consumer_cancelled(self, method_frame):
        """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
        receiving messages.

        :param pika.frame.Method method_frame: The Basic.Cancel frame
        :return:
        """
        self.log.info("Consumer was cancelled remotely, shutting down: %r",
                      method_frame)
        if self._channel:
            self._channel.close()

    def acknowledge_message(self, delivery_tag):
        """

        :param delivery_tag:
        :return:
        """
        self.log.info("Acknowledging message %s", delivery_tag)
        self._channel.basic_ack(delivery_tag)
        
    def requeue_message(self, delivery_tag):
        """
        
        :param delivery_tag: 
        :return:
        """
        self.log.info("Requeuing message %s", delivery_tag)
        self._channel.basic_nack(delivery_tag, requeue=True)

    def on_message(self, channel, basic_deliver, properties, body):
        """Invoked when message received from rabbit

        :param pika.channel.Channel channel:
        :param pika.spec.Basic.Deliver basic_deliver:
        :param pika.spec.BasicProperties properties:
        :param str body:
        :return:
        """

        self.log.info("Received messages # %s from %s",
                      basic_deliver.delivery_tag,
                      properties.app_id)
        
        try:
            if self._tasks_number >= self._max_tasks:
                raise RuntimeError("Max tasks limit reached")
            
            self._tasks_number += 1
            
            ftr = self._executor.submit(self.process_task, body)

            def process_done(future: Future):
                nonlocal self
                self._tasks_number -= 1
                if future.cancelled():
                    # process_task ended by cancel
                    self.requeue_message(self.requeue_message(
                        basic_deliver.delivery_tag)
                    )
                else:
                    if future.exception():
                        exception = future.exception()
                        if not isinstance(exception, RequeueMessage):
                            self.log.exception(exception)
                        
                        self.requeue_message(
                            basic_deliver.delivery_tag
                        )
                    else:
                        self.acknowledge_message(basic_deliver.delivery_tag)

            ftr.add_done_callback(process_done)

            return ftr

        except RuntimeError:
            self.requeue_message(basic_deliver.delivery_tag)
            time.sleep(0.5)

        except Exception as e:
            self.log.exception(e)
            self.requeue_message(basic_deliver.delivery_tag)
            time.sleep(10)

    def stop_consuming(self):
        """Send Basic.Cancel to rabbit

        :return:
        """

        if self._channel:
            self.log.info("Stop consuming")
            self._channel.basic_cancel(self.on_cancel_ok, self._consumer_tag)

    def start_consuming(self):
        """Begins to consume messages

        :return:
        """

        self.log.info("Start consuming")

        self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
        self._consumer_tag = self._channel.basic_consume(self.on_message,
                                                         self.queue)

        self.run()

    def run(self):
        """Run consumer"""

        self.log.info("Running consumer")
        connection = self.connect()
        """:type: pika.SelectConnection"""

        channel = connection.channel()
        self._channel = channel
        self._connection = connection

        for method_frame, properties, body in channel.consume(self.queue):
            while self._tasks_number >= self._max_tasks:
                time.sleep(0.1)

            self.on_message(channel, method_frame, properties, body)


    def stop(self):
        """Stops consuming service
        :return:
        """

        self.log.info("Stopping")
        self._closing = True
        self.stop_consuming()
        self._executor.shutdown(True)
        # if self._connection is not None:
        #     self._connection.ioloop.start()
        self.log.info("Stopped")

    def __exit__(self, *args, **kwargs):

        self.stop()
        super(RabbitManager, self).__exit__(*args, **kwargs)
예제 #23
0
 def __init__(self, max_workers, event_reactor):
     ThreadPoolExecutor.__init__(self, max_workers)
     EventReactorMixin.__init__(self, event_reactor)
     event_reactor.register_handler(EventReactor.STOP_ID, self._stop_cb)
     self._task_map = WeakValueDictionary()
예제 #24
0
            # If max aligned number larger than 0, it considered that read is TP.
            return ReadType.TP if max_aligned > 0 else ReadType.FP, max_aligned
        else:
            target_aln = target_aligns[from_target.ref]
            hit_count = 0

            for pos in range(from_target.begin, from_target.end):
                hit_count += 1 if target_aln[pos] else 0

            aligned = hit_count / (from_target.end -
                                   from_target.begin) >= fn_cov

            return ReadType.FN if aligned else ReadType.TN, 0

    with ThreadPoolExecutor(max_workers=thread_num) as executor:
        map_list = executor.map(thread_fun, read_targets)

        progress_cnt = 0
        progress_bar = ProgressBar(maxval=len(read_targets),
                                   fd=sys.stderr).start()
        for result in map_list:
            read_type = result[0]
            if read_type == ReadType.TP:
                true_positive += 1
                aligned_base += result[1]
            elif read_type == ReadType.FP:
                false_positive += 1
            elif read_type == ReadType.TN:
                true_negative += 1
            elif read_type == ReadType.FN:
예제 #25
0
class MyComponent(model.Component):
    """
    A component that does everything
    """
    def __init__(self, name, daemon):
        model.Component.__init__(self, name=name, daemon=daemon)
        self.executor = ThreadPoolExecutor(max_workers=1)
        self.number_futures = 0
        self.startAcquire = model.Event() # triggers when the acquisition of .data starts
        self.data = FakeDataFlow(sae=self.startAcquire)
        self.datas = SynchronizableDataFlow()
        
        self.data_count = 0
        self._df = None
        
        # TODO automatically register the property when serializing the Component
        self.prop = model.IntVA(42)
        self.cont = model.FloatContinuous(2.0, [-1, 3.4], unit="C")
        self.enum = model.StringEnumerated("a", set(["a", "c", "bfds"]))
        self.cut = model.IntVA(0, setter=self._setCut)
        self.listval = model.ListVA([2, 65])

    
    def _setCut(self, value):
        self.data.cut = value
        return self.data.cut
    
    
    @roattribute
    def my_value(self):
        return "ro"
    
    def ping(self):
        """
        Returns (string): pong
        """
        return "pong"
     
    def bad_call(self):
        """
        always raise an exception
        """
        raise MyError
    
    # oneway to ensure that it will be set in a different thread than the call
    @oneway
    def change_prop(self, value):
        """
        set a new value for the VA prop
        """
        self.prop.value = value
    
    @isasync
    def do_long(self, duration=5):
        """
        return a futures.Future
        """
        ft = self.executor.submit(self._long_task, duration)
        ft.add_done_callback(self._on_end_long)
        return ft

    def _long_task(self, duration):
        """
        returns the time it took
        """
        start = time.time()
        time.sleep(duration)
        return (time.time() - start)
    
    def get_number_futures(self):
        return self.number_futures
    
    def set_number_futures(self, value):
        self.number_futures = value
    
    def _on_end_long(self, future):
        self.number_futures += 1

    def sub(self, df):
        self._df = df
        df.subscribe(self.data_receive)
    
    def unsub(self):
        self._df.unsubscribe(self.data_receive)
    
    def data_receive(self, df, data):
        logging.info("Received data of shape %r", data.shape)
        self.data_count += 1
    
    def get_data_count(self):
        return self.data_count
    
    # it'll never be able to answer back if everything goes fine
    @oneway
    def stopServer(self):
        self._pyroDaemon.shutdown()
예제 #26
0
    tokens_dict: Dict[str,
                      List[str]] = tokenizer.tokenize_bert(topic_info_dict)

    train_query, test_query = file_operation.extract_queries_for_bert()

    nltk.download('punkt')
    for topic_id in train_query:
        result_dict[topic_id] = {}
        train_query[topic_id] = nltk.tokenize.sent_tokenize(
            train_query[topic_id])

    vectorizer = Vectorizer()
    for topic_id in train_query:
        topic_id = str(topic_id)
        executor = ThreadPoolExecutor(len(tokens_dict))
        num_of_sentence_query = len(train_query[topic_id])
        i = 0
        ths = []

        temp_bm25_dict = bm25_dict[topic_id]
        sorted_bm25 = list(
            dict(
                sorted(temp_bm25_dict.items(),
                       key=lambda item: item[1],
                       reverse=True)).keys())[:100]

        for doc_id in sorted_bm25:
            num_of_sentence_doc = len(tokens_dict[doc_id])
            all_sentences = train_query[topic_id] + tokens_dict[doc_id]
예제 #27
0
STATIC_DIR = os.path.abspath(os.path.dirname(__file__))
mf_url = 'http://www.91porn.com/v.php?category=mf&viewtype=basic&page=4'
headers = {
    'Referer': 'http://www.91porn.com/index.php',
    'Domain-Name': 'porn9_video_domain_name',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
}
headers_1 = {
    'Referer': 'http://www.91porn.com/index.php',
    'Domain-Name': 'porn9_video_domain_name',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    'X-Forwarded-For': '104.216.141.12'
}
url_aria2 = 'http://104.206.144.11:6800/jsonrpc'

executor = ThreadPoolExecutor()
s = requests.Session()
retries = Retry(total=3, backoff_factor=1)
s.mount('http', HTTPAdapter(max_retries=retries))


def get_mf_list(url):
    mf_html = s.get(url, headers=headers_1)
    mf_list = {}
    soup = BeautifulSoup(mf_html.text, 'html.parser')
    for element in soup(text=lambda text: isinstance(text, Comment)):
        element.extract()
    image_channel = soup.find_all('div',
                                  class_='well well-sm videos-text-align')
    for channel in image_channel:
        a = channel.find('a', recursive=False)
def create_load_balancer(app_name: str, dependencies=None):
    dependencies = [] if dependencies is None else dependencies
    app_name = app_name.replace(',', '_')

    app = Flask(app_name)
    app.__response_level = ResponseLevel.NORMAL
    app.__dependencies = dependencies
    app.__executor = ThreadPoolExecutor(max_workers=16)

    # check dependencies should run asynchronously
    def check_dependencies(visited: str, dependencies: Iterable[str],
                           executor: _base.Executor) -> bool:
        visited_set = set(visited.split(','))
        filtered_dep = filter(
            lambda x: PORT_APP[int(x.split(':')[1])] not in visited_set,
            dependencies)

        # futures = []
        # for target in filtered_dep:
        #     get_url = f'http://{target}/?from={visited}'
        #     future = executor.submit(lambda: http_get(get_url))
        #     futures.append((get_url, future))
        #
        # for (get_url, future) in futures:
        #     result = future.result()
        #     is_ok = result.status_code == 200
        #     if not is_ok:
        #         return False

        for target in filtered_dep:
            get_url = f'http://{target}/?from={visited}'
            response = http_get(get_url)
            is_ok = response.status_code == 200
            if is_ok:
                return True

        return False

    @app.route('/', methods=['GET'])
    @app.route('/ping', methods=['GET'])
    def ping():
        visited = request.args.get('from')
        updated_visited = current_app.name + ('' if visited is None else
                                              (',' + visited))

        response_level = current_app.__response_level
        if response_level == ResponseLevel.NORMAL:
            dependencies_ok = check_dependencies(updated_visited,
                                                 current_app.__dependencies,
                                                 current_app.__executor)
            message, code = ('OK', 200) if dependencies_ok else ('UNHEALTHY',
                                                                 500)
            return Response(message, code)

        elif response_level == ResponseLevel.TERMINATED:
            return Response('UNHEALTHY', 500)

        else:
            sleep(5.0)
            dependencies_ok = check_dependencies(updated_visited,
                                                 current_app.__dependencies,
                                                 current_app.__executor)
            message, code = ('OK', 200) if dependencies_ok else ('UNHEALTHY',
                                                                 500)
            return Response(message, code)

    @app.route('/response/<level>', methods=['GET'])
    def set_response_level(level):
        try:
            level = int(level)
            response_level = inverse_response_level(level)
            current_app.__response_level = response_level
            return 'OK'
        except ValueError as e:
            return str(e)

    return app
예제 #29
0
def execute_command_in_dependencies(
    command,
    dependencies,
    required_files_filter=None,
    dry_run=False,
    verbose=False,
    continue_on_failure=False,
    here=False,
    jobs=1,
    jobs_unordered=False,
):
    """
    Execute the given command for the given dependencies.

    :param list(unicode) command: The commando to be executed.

    :param list(Dep) dependencies: The list of dependencies for which execute the command.

    :param callable required_files_filter: A list os files required in a dependency root directory
        to execute the command.

    :param bool dry_run: Does all the checks and most output normally but does not actually execute
        the command.

    :param bool verbose: Prints extra information.

    :param bool continue_on_failure: When this is `False` the first command with a non zero return
        code makes the dependency processing to stop and this function returns, when it is `True`
        all dependencies are always processed.

    :param bool here: Does not change the working dir to the root of the dependency when executing
        the command.

    :param int jobs: The number of concurrent jobs to be executed.

    :param bool jobs_unordered: This only makes a difference if jobs > 1, in which case it'll be
        able to run all jobs in parallel, without taking into account any pre-condition for the job
        to run (otherwise, it'll run jobs considering that its pre-requisites are ran first).

    :rtype: list(int)
    :return: The exit code of the commands executed so far (may be smaller than `dependencies` list
        when `continue_on_failure` is false).
    """
    exit_codes = []
    error_messages = []
    initial = [x.name for x in dependencies]
    buffer_output = False
    output_separator = "\n" + "=" * MAX_LINE_LENGTH

    if jobs > 1:
        buffer_output = True
        from concurrent.futures.thread import ThreadPoolExecutor

        executor = ThreadPoolExecutor(max_workers=jobs)
        previously_added_to_batch = set()

        def calculate_next_batch(dependencies):
            next_batch = []
            if jobs_unordered:
                next_batch.extend(dependencies)
                del dependencies[:]
            else:
                msg = []
                for i, dep in reversed(list(enumerate(dependencies))):
                    for depends_on in get_abs_path_to_dep_for_all_deps(
                            dep).values():
                        if depends_on not in previously_added_to_batch:
                            msg.append("{} still depending on: {}".format(
                                dep.name, depends_on.name))
                            break
                    else:
                        next_batch.append(dependencies.pop(i))

                if not next_batch and dependencies:
                    raise AssertionError(
                        "No batch calculated and dependencies still available.\n\n"
                        "Remaining:\n%s\n\nFinished:\n%s\n\nAll:\n%s" % (
                            "\n".join(msg),
                            "\n".join(
                                str(x.name)
                                for x in previously_added_to_batch),
                            "\n".join(initial),
                        ))

            previously_added_to_batch.update(next_batch)
            return next_batch

    else:
        from ._synchronous_executor import SynchronousExecutor

        executor = SynchronousExecutor()

        def calculate_next_batch(dependencies):
            # The next is the first one in the list.
            return [dependencies.pop(0)]

    progress = 0
    total_progress = len(dependencies)
    while len(dependencies) > 0:
        deps = calculate_next_batch(dependencies)
        progress += len(deps)
        dep_to_future = {}
        first = True
        print_str = ", ".join(dep.name for dep in deps)
        for dep in deps:
            if len(deps) == 1 or first:
                click.secho(output_separator,
                            fg="black",
                            bold=True,
                            color=_click_echo_color)

            # Checks before execution.
            if dep.ignored:
                click.secho(dep.name,
                            fg="blue",
                            bold=True,
                            color=_click_echo_color,
                            nl=False)
                click.secho(" ignored", fg="yellow", color=_click_echo_color)
                continue

            if dep.skipped:
                click.secho(dep.name,
                            fg="blue",
                            bold=True,
                            color=_click_echo_color,
                            nl=False)
                click.secho(" skipped", fg="magenta", color=_click_echo_color)
                continue

            if not required_files_filter(dep, quiet=False):
                continue

            formatted_command = format_command(command, dep)

            working_dir = None
            if not here:
                working_dir = dep.abspath

            if len(deps) == 1 or first:
                msg = "%s (%d/%d)" % (print_str, progress, total_progress)
                click.secho(msg, fg="blue", bold=True, color=_click_echo_color)
            if verbose or dry_run:
                command_to_print = " ".join(
                    arg.replace(" ", "\\ ") for arg in formatted_command)
                echo_verbose_msg("executing: " + command_to_print)
                if working_dir:
                    echo_verbose_msg("from:      " + working_dir)

            if not dry_run:
                dep_to_future[dep] = executor.submit(execute,
                                                     formatted_command,
                                                     working_dir,
                                                     buffer_output)

            first = False

        for dep, future in dep_to_future.items():
            try:
                returncode, stdout, stderr, command_time = future.result()
            except Exception as e:
                # Usually should only fail on CancelledException
                returncode = 1
                stdout = ""
                stderr = str(e)
                command_time = 0.0

            exit_codes.append(returncode)

            click.secho(
                "Finished: {} in {:.2f}s".format(dep.name, command_time),
                fg="white",
                bold=False,
                color=_click_echo_color,
            )
            if buffer_output:
                if stdout:
                    click.secho("=== STDOUT ===")

                    if type(stdout) is not str:
                        stdout = stdout.decode("utf-8", errors="replace")

                    click.secho(stdout)

                if stderr:
                    click.secho("=== STDERR ===", fg="red", bold=True)
                    if type(stderr) is not str:
                        stderr = stderr.decode("utf-8", errors="replace")
                    click.secho(stderr, fg="red", bold=True)

            if verbose:
                if jobs > 1:
                    echo_verbose_msg("return code for project {}: {}".format(
                        dep.name, returncode))
                else:
                    echo_verbose_msg("return code: {}".format(returncode))

            if returncode != 0:
                error_msg = "Command failed (project: {})".format(dep.name)
                error_messages.append(error_msg)
                echo_error(error_msg)

                if not continue_on_failure:
                    # Cancel what can be cancelled in case we had a failure.
                    for f in dep_to_future.values():
                        f.cancel()

        if not continue_on_failure:
            keep_on_going = True
            for returncode in exit_codes:
                if returncode != 0:
                    keep_on_going = False
                    break

            if not keep_on_going:
                break

    # If we have errors and we kept on going or executed multiple jobs, print a summary of the
    # errors at the end.
    if continue_on_failure or jobs > 1:
        if error_messages:
            echo_error(output_separator)
            echo_error("A list of all errors follow:")
        for msg in error_messages:
            echo_error(msg)

    return exit_codes
예제 #30
0
 def __init__(self):
     self.executor = ThreadPoolExecutor()
     self.aktuels = []
예제 #31
0
class VSphereCheck(AgentCheck):
    __NAMESPACE__ = 'vsphere'

    def __new__(cls, name, init_config, instances):
        # type: (Type[VSphereCheck], str, Dict[str, Any], List[Dict[str, Any]]) -> VSphereCheck
        """For backward compatibility reasons, there are two side-by-side implementations of the VSphereCheck.
        Instantiating this class will return an instance of the legacy integration for existing users and
        an instance of the new implementation for new users."""
        if is_affirmative(instances[0].get('use_legacy_check_version', True)):
            from datadog_checks.vsphere.legacy.vsphere_legacy import VSphereLegacyCheck

            return VSphereLegacyCheck(name, init_config, instances)  # type: ignore
        return super(VSphereCheck, cls).__new__(cls)

    def __init__(self, *args, **kwargs):
        # type: (*Any, **Any) -> None
        super(VSphereCheck, self).__init__(*args, **kwargs)
        instance = cast(InstanceConfig, self.instance)
        self._config = VSphereConfig(instance, self.init_config, self.log)

        self.latest_event_query = get_current_datetime()
        self.infrastructure_cache = InfrastructureCache(interval_sec=self._config.refresh_infrastructure_cache_interval)
        self.metrics_metadata_cache = MetricsMetadataCache(
            interval_sec=self._config.refresh_metrics_metadata_cache_interval
        )
        self.api = cast(VSphereAPI, None)
        self.api_rest = cast(VSphereRestAPI, None)
        # Do not override `AgentCheck.hostname`
        self._hostname = None
        self.thread_pool = ThreadPoolExecutor(max_workers=self._config.threads_count)
        self.check_initializations.append(self.initiate_api_connection)

    def initiate_api_connection(self):
        # type: () -> None
        try:
            self.log.debug(
                "Connecting to the vCenter API %s with username %s...", self._config.hostname, self._config.username
            )
            self.api = VSphereAPI(self._config, self.log)
            self.log.debug("Connected")
        except APIConnectionError:
            self.log.error("Cannot authenticate to vCenter API. The check will not run.")
            self.service_check(SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=self._config.base_tags, hostname=None)
            raise

        if self._config.should_collect_tags:
            try:
                version_info = self.api.get_version()
                major_version = int(version_info.version_str[0])

                if major_version >= 7:
                    try:
                        # Try to connect to REST API vSphere v7
                        self.api_rest = VSphereRestAPI(self._config, self.log, False)
                        return
                    except Exception:
                        self.log.debug("REST API of vSphere 7 not detected, falling back to the old API.")
                self.api_rest = VSphereRestAPI(self._config, self.log, True)
            except Exception as e:
                self.log.error("Cannot connect to vCenter REST API. Tags won't be collected. Error: %s", e)

    def refresh_metrics_metadata_cache(self):
        # type: () -> None
        """
        Request the list of counters (metrics) from vSphere and store them in a cache.
        """
        self.log.debug(
            "Refreshing the metrics metadata cache. Collecting all counters metadata for collection_level=%d",
            self._config.collection_level,
        )
        t0 = Timer()
        counters = self.api.get_perf_counter_by_level(self._config.collection_level)
        self.gauge(
            "datadog.vsphere.refresh_metrics_metadata_cache.time",
            t0.total(),
            tags=self._config.base_tags,
            raw=True,
            hostname=self._hostname,
        )
        self.log.debug("Collected %d counters metadata in %.3f seconds.", len(counters), t0.total())

        for mor_type in self._config.collected_resource_types:
            allowed_counters = []
            for c in counters:
                metric_name = format_metric_name(c)
                if metric_name in ALLOWED_METRICS_FOR_MOR[mor_type] and not is_metric_excluded_by_filters(
                    metric_name, mor_type, self._config.metric_filters
                ):
                    allowed_counters.append(c)
            metadata = {c.key: format_metric_name(c) for c in allowed_counters}  # type: Dict[CounterId, MetricName]
            self.metrics_metadata_cache.set_metadata(mor_type, metadata)
            self.log.debug(
                "Set metadata for mor_type %s: %s",
                mor_type,
                metadata,
            )

        # TODO: Later - Understand how much data actually changes between check runs
        # Apparently only when the server restarts?
        # https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.5.html

    def collect_tags(self, infrastructure_data):
        # type: (InfrastructureData) -> ResourceTags
        """
        Fetch the all tags, build tags for each monitored resources and store all of that into the tags_cache.
        """
        if not self.api_rest:
            return {}

        # In order to be more efficient in tag collection, the infrastructure data is filtered as much as possible.
        # All filters are applied except the ones based on tags of course.
        resource_filters_without_tags = [f for f in self._config.resource_filters if not isinstance(f, TagFilter)]
        filtered_infra_data = {
            mor: props
            for mor, props in iteritems(infrastructure_data)
            if isinstance(mor, tuple(self._config.collected_resource_types))
            and is_resource_collected_by_filters(mor, infrastructure_data, resource_filters_without_tags)
        }

        t0 = Timer()
        mors_list = list(filtered_infra_data.keys())
        try:
            mor_tags = self.api_rest.get_resource_tags_for_mors(mors_list)
        except Exception as e:
            self.log.error("Failed to collect tags: %s", e)
            return {}

        self.gauge(
            'datadog.vsphere.query_tags.time',
            t0.total(),
            tags=self._config.base_tags,
            raw=True,
            hostname=self._hostname,
        )

        return mor_tags

    def refresh_infrastructure_cache(self):
        # type: () -> None
        """Fetch the complete infrastructure, generate tags for each monitored resources and store all of that
        into the infrastructure_cache. It also computes the resource `hostname` property to be used when submitting
        metrics for this mor."""
        self.log.debug("Refreshing the infrastructure cache...")
        t0 = Timer()
        infrastructure_data = self.api.get_infrastructure()
        self.gauge(
            "datadog.vsphere.refresh_infrastructure_cache.time",
            t0.total(),
            tags=self._config.base_tags,
            raw=True,
            hostname=self._hostname,
        )
        self.log.debug("Infrastructure cache refreshed in %.3f seconds.", t0.total())
        self.log.debug("Infrastructure cache: %s", infrastructure_data)

        all_tags = {}
        if self._config.should_collect_tags:
            all_tags = self.collect_tags(infrastructure_data)
        self.infrastructure_cache.set_all_tags(all_tags)

        for mor, properties in iteritems(infrastructure_data):
            if not isinstance(mor, tuple(self._config.collected_resource_types)):
                # Do nothing for the resource types we do not collect
                continue

            mor_name = to_string(properties.get("name", "unknown"))
            mor_type_str = MOR_TYPE_AS_STRING[type(mor)]
            hostname = None
            tags = []

            if isinstance(mor, vim.VirtualMachine):
                power_state = properties.get("runtime.powerState")
                if power_state != vim.VirtualMachinePowerState.poweredOn:
                    # Skipping because the VM is not powered on
                    # TODO: Sometimes VM are "poweredOn" but "disconnected" and thus have no metrics
                    self.log.debug("Skipping VM %s in state %s", mor_name, to_string(power_state))
                    continue

                # Hosts are not considered as parents of the VMs they run, we use the `runtime.host` property
                # to get the name of the ESXi host
                runtime_host = properties.get("runtime.host")
                runtime_host_props = {}  # type: InfrastructureDataItem
                if runtime_host:
                    if runtime_host in infrastructure_data:
                        runtime_host_props = infrastructure_data.get(runtime_host, {})
                    else:
                        self.log.debug("Missing runtime.host details for VM %s", mor_name)
                runtime_hostname = to_string(runtime_host_props.get("name", "unknown"))
                tags.append('vsphere_host:{}'.format(runtime_hostname))

                if self._config.use_guest_hostname:
                    hostname = properties.get("guest.hostName", mor_name)
                else:
                    hostname = mor_name
            elif isinstance(mor, vim.HostSystem):
                hostname = mor_name
            else:
                tags.append('vsphere_{}:{}'.format(mor_type_str, mor_name))

            parent = properties.get('parent')
            runtime_host = properties.get('runtime.host')
            if parent is not None:
                tags.extend(get_tags_recursively(parent, infrastructure_data, self._config))
            if runtime_host is not None:
                tags.extend(
                    get_tags_recursively(
                        runtime_host, infrastructure_data, self._config, include_only=['vsphere_cluster']
                    )
                )
            tags.append('vsphere_type:{}'.format(mor_type_str))

            # Attach tags from fetched attributes.
            tags.extend(properties.get('attributes', []))

            resource_tags = self.infrastructure_cache.get_mor_tags(mor) + tags
            if not is_resource_collected_by_filters(
                mor,
                infrastructure_data,
                self._config.resource_filters,
                resource_tags,
            ):
                # The resource does not match the specified whitelist/blacklist patterns.
                self.log.debug(
                    "Skipping resource not matched by filters. resource=`%s` tags=`%s`", mor_name, resource_tags
                )
                continue

            mor_payload = {"tags": tags}  # type: Dict[str, Any]

            if hostname:
                mor_payload['hostname'] = hostname

            self.infrastructure_cache.set_mor_props(mor, mor_payload)

    def submit_metrics_callback(self, query_results):
        # type: (List[vim.PerformanceManager.EntityMetricBase]) -> None
        """
        Callback of the collection of metrics. This is run in the main thread!

        `query_results` currently contain results of one resource type in practice, but this function is generic
        and can handle results with mixed resource types.
        """

        # `have_instance_value` is used later to avoid collecting aggregated metrics
        # when instance metrics are collected.
        have_instance_value = defaultdict(set)  # type: Dict[Type[vim.ManagedEntity], Set[MetricName]]
        for results_per_mor in query_results:
            resource_type = type(results_per_mor.entity)
            metadata = self.metrics_metadata_cache.get_metadata(resource_type)
            for result in results_per_mor.value:
                if result.id.instance:
                    have_instance_value[resource_type].add(metadata[result.id.counterId])

        for results_per_mor in query_results:
            mor_props = self.infrastructure_cache.get_mor_props(results_per_mor.entity)
            if mor_props is None:
                self.log.debug(
                    "Skipping results for mor %s because the integration is not yet aware of it. If this is a problem"
                    " you can increase the value of 'refresh_infrastructure_cache_interval'.",
                    results_per_mor.entity,
                )
                continue
            self.log.debug(
                "Retrieved mor props for entity %s: %s",
                results_per_mor.entity,
                mor_props,
            )
            resource_type = type(results_per_mor.entity)
            metadata = self.metrics_metadata_cache.get_metadata(resource_type)
            for result in results_per_mor.value:
                metric_name = metadata.get(result.id.counterId)
                if self.log.isEnabledFor(logging.DEBUG):
                    # Use isEnabledFor to avoid unnecessary processing
                    self.log.debug(
                        "Processing metric `%s`: resource_type=`%s`, result=`%s`",
                        metric_name,
                        resource_type,
                        str(result).replace("\n", "\\n"),
                    )
                if not metric_name:
                    # Fail-safe
                    self.log.debug(
                        "Skipping value for counter %s, because the integration doesn't have metadata about it. If this"
                        " is a problem you can increase the value of 'refresh_metrics_metadata_cache_interval'",
                        result.id.counterId,
                    )
                    continue

                if not result.value:
                    self.log.debug("Skipping metric %s because the value is empty", to_string(metric_name))
                    continue

                # Get the most recent value that isn't negative
                valid_values = [v for v in result.value if v >= 0]
                if not valid_values:
                    self.log.debug(
                        "Skipping metric %s because the value returned by vCenter"
                        " is negative (i.e. the metric is not yet available). values: %s",
                        to_string(metric_name),
                        list(result.value),
                    )
                    continue

                tags = []
                if should_collect_per_instance_values(self._config, metric_name, resource_type) and (
                    metric_name in have_instance_value[resource_type]
                ):
                    instance_value = result.id.instance
                    # When collecting per instance values, it's possible that both aggregated metric and per instance
                    # metrics are received. In that case, the metric with no instance value is skipped.
                    if not instance_value:
                        continue
                    instance_tag_key = get_mapped_instance_tag(metric_name)
                    tags.append('{}:{}'.format(instance_tag_key, instance_value))

                vsphere_tags = self.infrastructure_cache.get_mor_tags(results_per_mor.entity)
                mor_tags = mor_props['tags'] + vsphere_tags

                if resource_type in HISTORICAL_RESOURCES:
                    # Tags are attached to the metrics
                    tags.extend(mor_tags)
                    hostname = None
                else:
                    # Tags are (mostly) submitted as external host tags.
                    hostname = to_string(mor_props.get('hostname'))
                    if self._config.excluded_host_tags:
                        tags.extend([t for t in mor_tags if t.split(":", 1)[0] in self._config.excluded_host_tags])

                tags.extend(self._config.base_tags)

                value = valid_values[-1]
                if metric_name in PERCENT_METRICS:
                    # Convert the percentage to a float.
                    value /= 100.0

                self.log.debug(
                    "Submit metric: name=`%s`, value=`%s`, hostname=`%s`, tags=`%s`",
                    metric_name,
                    value,
                    hostname,
                    tags,
                )
                # vSphere "rates" should be submitted as gauges (rate is precomputed).
                self.gauge(to_string(metric_name), value, hostname=hostname, tags=tags)

    def query_metrics_wrapper(self, query_specs):
        # type: (List[vim.PerformanceManager.QuerySpec]) -> List[vim.PerformanceManager.EntityMetricBase]
        """Just an instrumentation wrapper around the VSphereAPI.query_metrics method
        Warning: called in threads
        """
        t0 = Timer()
        metrics_values = self.api.query_metrics(query_specs)
        self.histogram(
            'datadog.vsphere.query_metrics.time',
            t0.total(),
            tags=self._config.base_tags,
            raw=True,
            hostname=self._hostname,
        )
        return metrics_values

    def make_query_specs(self):
        # type: () -> Iterable[List[vim.PerformanceManager.QuerySpec]]
        """
        Build query specs using MORs and metrics metadata.
        """
        server_current_time = self.api.get_current_time()
        self.log.debug("Server current datetime: %s", server_current_time)
        for resource_type in self._config.collected_resource_types:
            mors = self.infrastructure_cache.get_mors(resource_type)
            counters = self.metrics_metadata_cache.get_metadata(resource_type)
            metric_ids = []  # type: List[vim.PerformanceManager.MetricId]
            for counter_key, metric_name in iteritems(counters):
                # PerformanceManager.MetricId `instance` kwarg:
                # - An asterisk (*) to specify all instances of the metric for the specified counterId
                # - Double-quotes ("") to specify aggregated statistics
                # More info https://code.vmware.com/apis/704/vsphere/vim.PerformanceManager.MetricId.html
                if should_collect_per_instance_values(self._config, metric_name, resource_type):
                    instance = "*"
                else:
                    instance = ''

                metric_ids.append(vim.PerformanceManager.MetricId(counterId=counter_key, instance=instance))

            for batch in self.make_batch(mors, metric_ids, resource_type):
                query_specs = []
                for mor, metrics in iteritems(batch):
                    query_spec = vim.PerformanceManager.QuerySpec()  # type: vim.PerformanceManager.QuerySpec
                    query_spec.entity = mor
                    query_spec.metricId = metrics
                    if resource_type in REALTIME_RESOURCES:
                        query_spec.intervalId = REALTIME_METRICS_INTERVAL_ID
                        query_spec.maxSample = 1  # Request a single datapoint
                    else:
                        # We cannot use `maxSample` for historical metrics, let's specify a timewindow that will
                        # contain at least one element
                        query_spec.startTime = server_current_time - dt.timedelta(hours=2)
                    query_specs.append(query_spec)
                if query_specs:
                    yield query_specs

    def collect_metrics_async(self):
        # type: () -> None
        """Run queries in multiple threads and wait for completion."""
        tasks = []  # type: List[Any]
        try:
            for query_specs in self.make_query_specs():
                tasks.append(self.thread_pool.submit(self.query_metrics_wrapper, query_specs))
        except Exception as e:
            self.log.warning("Unable to schedule all metric collection tasks: %s", e)
        finally:
            self.log.debug("Queued all %d tasks, waiting for completion.", len(tasks))
            for future in as_completed(tasks):
                future_exc = future.exception()
                if isinstance(future_exc, vmodl.fault.InvalidArgument):
                    # The query was invalid or the resource does not have values for this metric.
                    continue
                elif future_exc is not None:
                    self.log.warning("A metric collection API call failed with the following error: %s", future_exc)
                    continue

                results = future.result()
                if not results:
                    self.log.debug("A metric collection API call did not return data.")
                    continue

                try:
                    # Callback is called in the main thread
                    self.submit_metrics_callback(results)
                except Exception as e:
                    self.log.exception(
                        "Exception '%s' raised during the submit_metrics_callback. "
                        "Ignoring the error and continuing execution.",
                        e,
                    )

    def make_batch(
        self,
        mors,  # type: Iterable[vim.ManagedEntity]
        metric_ids,  # type: List[vim.PerformanceManager.MetricId]
        resource_type,  # type: Type[vim.ManagedEntity]
    ):  # type: (...) -> Generator[MorBatch, None, None]
        """Iterates over mor and generate batches with a fixed number of metrics to query.
        Querying multiple resource types in the same call is error prone if we query a cluster metric. Indeed,
        cluster metrics result in an unpredictable number of internal metric queries which all count towards
        max_query_metrics. Therefore often collecting a single cluster metric can make the whole call to fail. That's
        why we should never batch cluster metrics with anything else.
        """
        # Safeguard, let's avoid collecting multiple resources in the same call
        mors_filtered = [m for m in mors if isinstance(m, resource_type)]  # type: List[vim.ManagedEntity]

        if resource_type == vim.ClusterComputeResource:
            # Cluster metrics are unpredictable and a single call can max out the limit. Always collect them one by one.
            max_batch_size = 1  # type: float
        elif resource_type in REALTIME_RESOURCES or self._config.max_historical_metrics < 0:
            # Queries are not limited by vCenter
            max_batch_size = self._config.metrics_per_query
        else:
            # Collection is limited by the value of `max_query_metrics`
            if self._config.metrics_per_query < 0:
                max_batch_size = self._config.max_historical_metrics
            else:
                max_batch_size = min(self._config.metrics_per_query, self._config.max_historical_metrics)

        batch = defaultdict(list)  # type: MorBatch
        batch_size = 0
        for m in mors_filtered:
            for metric_id in metric_ids:
                if batch_size == max_batch_size:
                    yield batch
                    batch = defaultdict(list)
                    batch_size = 0
                batch[m].append(metric_id)
                batch_size += 1
        # Do not yield an empty batch
        if batch:
            yield batch

    def submit_external_host_tags(self):
        # type: () -> None
        """Send external host tags to the Datadog backend. This is only useful for a REALTIME instance because
        only VMs and Hosts appear as 'datadog hosts'."""
        external_host_tags = []

        for resource_type in REALTIME_RESOURCES:
            for mor in self.infrastructure_cache.get_mors(resource_type):
                mor_props = self.infrastructure_cache.get_mor_props(mor)
                mor_tags = self.infrastructure_cache.get_mor_tags(mor)
                hostname = mor_props.get('hostname')
                # Safeguard if some mors have a None hostname
                if not hostname:
                    continue

                mor_tags = mor_props['tags'] + mor_tags
                tags = [t for t in mor_tags if t.split(':')[0] not in self._config.excluded_host_tags]
                tags.extend(self._config.base_tags)
                external_host_tags.append((hostname, {self.__NAMESPACE__: tags}))

        if external_host_tags:
            self.set_external_tags(external_host_tags)

    def collect_events(self):
        # type: () -> None
        self.log.debug("Starting events collection (query start time: %s).", self.latest_event_query)
        latest_event_time = None
        collect_start_time = get_current_datetime()
        try:
            t0 = Timer()
            new_events = self.api.get_new_events(start_time=self.latest_event_query)
            self.gauge(
                'datadog.vsphere.collect_events.time',
                t0.total(),
                tags=self._config.base_tags,
                raw=True,
                hostname=self._hostname,
            )
            self.log.debug("Got %s new events from the vCenter event manager", len(new_events))
            event_config = {'collect_vcenter_alarms': True}
            for event in new_events:
                self.log.debug(
                    "Processing event with id:%s, type:%s: msg:%s", event.key, type(event), event.fullFormattedMessage
                )
                normalized_event = VSphereEvent(event, event_config, self._config.base_tags)
                # Can return None if the event if filtered out
                event_payload = normalized_event.get_datadog_payload()
                if event_payload is not None:
                    self.log.debug(
                        "Submit event with id:%s, type:%s: msg:%s", event.key, type(event), event.fullFormattedMessage
                    )
                    self.event(event_payload)
                if latest_event_time is None or event.createdTime > latest_event_time:
                    latest_event_time = event.createdTime
        except Exception as e:
            # Don't get stuck on a failure to fetch an event
            # Ignore them for next pass
            self.log.warning("Unable to fetch Events %s", e)

        if latest_event_time is not None:
            self.latest_event_query = latest_event_time + dt.timedelta(seconds=1)
        else:
            # Let's set `self.latest_event_query` to `collect_start_time` as safeguard in case no events are reported
            # OR something bad happened (which might happen again indefinitely).
            self.latest_event_query = collect_start_time

    def check(self, _):
        # type: (Any) -> None
        self._hostname = datadog_agent.get_hostname()
        # Assert the health of the vCenter API by getting the version, and submit the service_check accordingly
        try:
            version_info = self.api.get_version()
            if self.is_metadata_collection_enabled():
                self.set_metadata('version', version_info.version_str)
        except Exception:
            # Explicitly do not attach any host to the service checks.
            self.log.exception("The vCenter API is not responding. The check will not run.")
            self.service_check(SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=self._config.base_tags, hostname=None)
            raise
        else:
            self.service_check(SERVICE_CHECK_NAME, AgentCheck.OK, tags=self._config.base_tags, hostname=None)

        # Collect and submit events
        if self._config.should_collect_events:
            self.collect_events()

        if self._config.collect_events_only:
            return

        # Update the value of `max_query_metrics` if needed
        if self._config.is_historical():
            try:
                vcenter_max_hist_metrics = self.api.get_max_query_metrics()
                if vcenter_max_hist_metrics < self._config.max_historical_metrics:
                    self.log.warning(
                        "The integration was configured with `max_query_metrics: %d` but your vCenter has a"
                        "limit of %d which is lower. Ignoring your configuration in favor of the vCenter value."
                        "To update the vCenter value, please update the `%s` field",
                        self._config.max_historical_metrics,
                        vcenter_max_hist_metrics,
                        MAX_QUERY_METRICS_OPTION,
                    )
                    self._config.max_historical_metrics = vcenter_max_hist_metrics
            except Exception:
                self._config.max_historical_metrics = DEFAULT_MAX_QUERY_METRICS
                self.log.info(
                    "Could not fetch the value of %s, setting `max_historical_metrics` to %d.",
                    MAX_QUERY_METRICS_OPTION,
                    DEFAULT_MAX_QUERY_METRICS,
                )
                pass

        # Refresh the metrics metadata cache
        if self.metrics_metadata_cache.is_expired():
            with self.metrics_metadata_cache.update():
                self.refresh_metrics_metadata_cache()

        # Refresh the infrastructure cache
        if self.infrastructure_cache.is_expired():
            with self.infrastructure_cache.update():
                self.refresh_infrastructure_cache()
            # Submit host tags as soon as we have fresh data
            self.submit_external_host_tags()

        # Submit the number of VMs that are monitored
        for resource_type in self._config.collected_resource_types:
            for mor in self.infrastructure_cache.get_mors(resource_type):
                mor_props = self.infrastructure_cache.get_mor_props(mor)
                # Explicitly do not attach any host to those metrics.
                resource_tags = mor_props.get('tags', [])
                self.count(
                    '{}.count'.format(MOR_TYPE_AS_STRING[resource_type]),
                    1,
                    tags=self._config.base_tags + resource_tags,
                    hostname=None,
                )

        # Creating a thread pool and starting metric collection
        self.log.debug("Starting metric collection in %d threads.", self._config.threads_count)
        self.collect_metrics_async()
        self.log.debug("Metric collection completed.")
예제 #32
0
class AktuelFinder:
    def __init__(self):
        self.exception = False
        self.executor = ThreadPoolExecutor()
        self.markets = {'BİM': BimAktuel, 'A101': A101Aktuel, 'ŞOK': SokAktuel}
        self.aktuel_db = AktuelDB('aktuels')
        self.active_aktuels = {}
        self.still_active_aktuels = {}
        self.expired_aktuels = {}
        self.new_aktuels = {}

    def get_aktuels(self):
        aktuels = []
        threads = {}

        for name, market in self.markets.items():
            obj = market()
            threads[name] = self.executor.submit(obj.get_aktuels)
        for name, thread in threads.items():
            try:
                aktuels += thread.result()
            except requests.exceptions.ConnectionError as e:
                # print(e, 11)
                aktuels += self.aktuel_db.read_aktuel(name)
                self.exception = True

        aktuels = sorted(aktuels, key=lambda k: k['tarih'], reverse=False)
        aktuels = sorted(aktuels, key=lambda k: k['magaza'], reverse=False)

        return aktuels

    def show_summary(self):
        saved_aktuels = self.aktuel_db.read_aktuels()
        aktuels = self.get_aktuels()

        self.active_aktuels = self.get_active_aktuels(saved_aktuels)
        self.still_active_aktuels = {}
        self.expired_aktuels = self.get_expired_aktuels(
            aktuels, self.active_aktuels)
        self.new_aktuels = self.get_new_aktuels(aktuels, self.active_aktuels)

        if self.exception:
            print('')

        if self.active_aktuels:
            print("Chosen campaigns:")
            for key, active_aktuel in self.active_aktuels.items():
                print(active_aktuel['magaza'], active_aktuel['aktuel'])
            print('')
        else:
            pass

        if self.expired_aktuels:
            print("Expired campaigns:")
            for key, expired_aktuel in self.expired_aktuels.items():
                print(
                    str(key) + '.', expired_aktuel['magaza'],
                    expired_aktuel['aktuel'])
            print(
                "* Please enter the IDs of campaigns you want to delete by adding ':' to beginning. (Enter :0 for deleting all of them)\n"
            )
        else:
            pass

        if self.new_aktuels:
            print("New campaigns:")
            for key, new_aktuel in self.new_aktuels.items():
                print(
                    str(key) + '.', new_aktuel['magaza'], new_aktuel['aktuel'])
            print(
                "* Please enter the IDs of campaigns you choose. (Enter 0 for choose all of them)\n"
            )
        else:
            pass

        if not self.expired_aktuels and not self.new_aktuels:
            print("No new campaign.")
            input()

    @staticmethod
    def get_active_aktuels(saved_aktuels):
        active_aktuels = {}
        count = 1

        for aktuel in saved_aktuels:
            if aktuel['durum'] == 'active':
                active_aktuels[count] = aktuel
                count += 1
            else:
                pass

        return active_aktuels

    def get_expired_aktuels(self, aktuels, active_aktuels):
        expired_aktuels = {}
        count = 1
        count0 = 1

        for key, active_aktuel in active_aktuels.items():
            active_market = active_aktuel['magaza']
            active_aktuel_name = active_aktuel['aktuel']

            exist = False
            for aktuel in aktuels:
                if aktuel['magaza'] == active_market and aktuel[
                        'aktuel'] == active_aktuel_name:
                    exist = True
                    break
            if not exist:
                expired_aktuels[count] = active_aktuel
                count += 1
            else:
                self.still_active_aktuels[count0] = active_aktuel
                count0 += 1

        return expired_aktuels

    @staticmethod
    def get_new_aktuels(aktuels, active_aktuels):
        new_aktuels = {}
        count = 1

        for aktuel in aktuels:
            market = aktuel['magaza']
            aktuel_name = aktuel['aktuel']

            exist = False
            for key, active_aktuel in active_aktuels.items():
                if active_aktuel['magaza'] == market and active_aktuel[
                        'aktuel'] == aktuel_name:
                    exist = True
                    break
            if not exist:
                new_aktuels[count] = aktuel
                count += 1
        return new_aktuels

    def command(self):
        user_inputs = []
        if self.expired_aktuels or self.new_aktuels:
            print(
                "* Split every command with ',' character. (Enter '#' for saving the session)\n"
            )
            while not self.command_control(user_inputs, len(self.new_aktuels),
                                           len(self.expired_aktuels)):
                user_inputs = input("Command Line: ")
                user_inputs = self.command_optimizer(user_inputs)
            self.command_execution(user_inputs)
            self.save_aktuels()
        else:
            pass

    @staticmethod
    def command_control(user_inputs, new_max, expired_max):
        try:
            if not user_inputs:
                return False
            for key in user_inputs:
                if key == '#':
                    return True
                elif key[0] == ':':
                    if not key[1:].isnumeric() or int(key[1:]) < 0 or int(
                            key[1:]) > expired_max or expired_max == 0:
                        return False
                elif not key.isnumeric() or int(key) < 0 or int(
                        key) > new_max or new_max == 0:
                    return False
                else:
                    pass
            return True
        except Exception as e:
            print(e, 12)
            return False

    @staticmethod
    def command_optimizer(user_inputs):
        user_inputs = sorted(set(''.join(user_inputs.split()).split(',')))
        for a in user_inputs:
            if not a:
                user_inputs.remove(a)

        return user_inputs

    def command_execution(self, user_inputs):
        for user_input in user_inputs:
            if user_input == '#':
                break
            elif user_input[0] == ':':
                if user_input[1] == '0':
                    self.expired_aktuels = {}
                else:
                    self.expired_aktuels.pop(int(user_input[1:]), None)
            elif user_input == '0':
                for key, new_aktuel in self.new_aktuels.items():
                    new_aktuel['durum'] = 'active'
            else:
                self.new_aktuels[int(user_input)]['durum'] = 'active'

    def save_aktuels(self):
        aktuels = []
        for key, value in self.new_aktuels.items():
            aktuels.append(value)
        for key, value in self.expired_aktuels.items():
            aktuels.append(value)
        for key, value in self.still_active_aktuels.items():
            aktuels.append(value)

        self.aktuel_db.save_aktuels(aktuels)
예제 #33
0
 def _retrieve_results(tasks, max_workers):
     with ThreadPoolExecutor(max_workers=max_workers) as executor:
         result_futures = [executor.submit(task.result) for task in tasks]
     return [future.result() for future in result_futures]
예제 #34
0
파일: app.py 프로젝트: waleedeh/bolt-python
    def __init__(
        self,
        *,
        logger: Optional[logging.Logger] = None,
        # Used in logger
        name: Optional[str] = None,
        # Set True when you run this app on a FaaS platform
        process_before_response: bool = False,
        # Basic Information > Credentials > Signing Secret
        signing_secret: Optional[str] = None,
        # for single-workspace apps
        token: Optional[str] = None,
        token_verification_enabled: bool = True,
        client: Optional[WebClient] = None,
        # for multi-workspace apps
        authorize: Optional[Callable[..., AuthorizeResult]] = None,
        installation_store: Optional[InstallationStore] = None,
        # for v1.0.x compatibility
        installation_store_bot_only: Optional[bool] = None,
        # for the OAuth flow
        oauth_settings: Optional[OAuthSettings] = None,
        oauth_flow: Optional[OAuthFlow] = None,
        # No need to set (the value is used only in response to ssl_check requests)
        verification_token: Optional[str] = None,
    ):
        """Bolt App that provides functionalities to register middleware/listeners

        :param name: The application name that will be used in logging.
            If absent, the source file name will be used instead.
        :param process_before_response: True if this app runs on Function as a Service. (Default: False)
        :param signing_secret: The Signing Secret value used for verifying requests from Slack.
        :param token: The bot access token required only for single-workspace app.
        :param token_verification_enabled: Verifies the validity of the given token if True.
        :param client: The singleton slack_sdk.WebClient instance for this app.
        :param authorize: The function to authorize an incoming request from Slack
            by checking if there is a team/user in the installation data.
        :param installation_store: The module offering save/find operations of installation data
        :param installation_store_bot_only: Use InstallationStore#find_bot if True (Default: False)
        :param oauth_settings: The settings related to Slack app installation flow (OAuth flow)
        :param oauth_flow: Manually instantiated slack_bolt.oauth.OAuthFlow.
            This is always prioritized over oauth_settings.
        :param verification_token: Deprecated verification mechanism.
            This can used only for ssl_check requests.
        """
        signing_secret = signing_secret or os.environ.get(
            "SLACK_SIGNING_SECRET")
        token = token or os.environ.get("SLACK_BOT_TOKEN")

        self._name: str = name or inspect.stack()[1].filename.split(
            os.path.sep)[-1]
        self._signing_secret: str = signing_secret

        self._verification_token: Optional[
            str] = verification_token or os.environ.get(
                "SLACK_VERIFICATION_TOKEN", None)
        self._framework_logger = logger or get_bolt_logger(App)

        self._token: Optional[str] = token

        if client is not None:
            if not isinstance(client, WebClient):
                raise BoltError(error_client_invalid_type())
            self._client = client
            self._token = client.token
            if token is not None:
                self._framework_logger.warning(
                    warning_client_prioritized_and_token_skipped())
        else:
            self._client = create_web_client(
                token)  # NOTE: the token here can be None

        # --------------------------------------
        # Authorize & OAuthFlow initialization
        # --------------------------------------

        self._authorize: Optional[Authorize] = None
        if authorize is not None:
            if oauth_settings is not None or oauth_flow is not None:
                raise BoltError(error_authorize_conflicts())
            self._authorize = CallableAuthorize(logger=self._framework_logger,
                                                func=authorize)

        self._installation_store: Optional[
            InstallationStore] = installation_store
        if self._installation_store is not None and self._authorize is None:
            self._authorize = InstallationStoreAuthorize(
                installation_store=self._installation_store,
                logger=self._framework_logger,
                bot_only=installation_store_bot_only,
            )

        self._oauth_flow: Optional[OAuthFlow] = None

        if (oauth_settings is None
                and os.environ.get("SLACK_CLIENT_ID") is not None
                and os.environ.get("SLACK_CLIENT_SECRET") is not None):
            # initialize with the default settings
            oauth_settings = OAuthSettings()

        if oauth_flow:
            self._oauth_flow = oauth_flow
            installation_store = select_consistent_installation_store(
                client_id=self._oauth_flow.client_id,
                app_store=self._installation_store,
                oauth_flow_store=self._oauth_flow.settings.installation_store,
                logger=self._framework_logger,
            )
            self._installation_store = installation_store
            self._oauth_flow.settings.installation_store = installation_store

            if self._oauth_flow._client is None:
                self._oauth_flow._client = self._client
            if self._authorize is None:
                self._authorize = self._oauth_flow.settings.authorize
        elif oauth_settings is not None:
            installation_store = select_consistent_installation_store(
                client_id=oauth_settings.client_id,
                app_store=self._installation_store,
                oauth_flow_store=oauth_settings.installation_store,
                logger=self._framework_logger,
            )
            self._installation_store = installation_store
            oauth_settings.installation_store = installation_store
            self._oauth_flow = OAuthFlow(client=self.client,
                                         logger=self.logger,
                                         settings=oauth_settings)
            if self._authorize is None:
                self._authorize = self._oauth_flow.settings.authorize

        if (self._installation_store is not None
                or self._authorize is not None) and self._token is not None:
            self._token = None
            self._framework_logger.warning(warning_token_skipped())

        # after setting bot_only here, __init__ cannot replace authorize function
        if installation_store_bot_only is not None and self._oauth_flow is not None:
            app_bot_only = installation_store_bot_only or False
            oauth_flow_bot_only = self._oauth_flow.settings.installation_store_bot_only
            if app_bot_only != oauth_flow_bot_only:
                self.logger.warning(warning_bot_only_conflicts())
                self._oauth_flow.settings.installation_store_bot_only = app_bot_only
                self._authorize.bot_only = app_bot_only

        # --------------------------------------
        # Middleware Initialization
        # --------------------------------------

        self._middleware_list: List[Union[Callable, Middleware]] = []
        self._listeners: List[Listener] = []

        listener_executor = ThreadPoolExecutor(max_workers=5)
        self._listener_runner = ThreadListenerRunner(
            logger=self._framework_logger,
            process_before_response=process_before_response,
            listener_error_handler=DefaultListenerErrorHandler(
                logger=self._framework_logger),
            listener_executor=listener_executor,
            lazy_listener_runner=ThreadLazyListenerRunner(
                logger=self._framework_logger,
                executor=listener_executor,
            ),
        )

        self._init_middleware_list_done = False
        self._init_middleware_list(
            token_verification_enabled=token_verification_enabled)
def wnms(results, outpath, outfile, iouthresh, savejson=1, nmsname="nms"):
    indexedresults = indexResults(results)
    mergedresults = defaultdict(list)
    for (imageid, objlist) in indexedresults.items():
        for objdict in objlist:
            mergedresults[imageid].append([
                objdict['bbox'][0], objdict['bbox'][1], objdict['bbox'][2],
                objdict['bbox'][3], objdict['score'], objdict['category_id'],
                objdict["number"]
            ])
        objlist = mergedresults[imageid]
        # masxlist=[i[2]*i[3] for i in objlist]
        # max_wh=np.max(masxlist)
        # objlist=[[i[0],i[1],i[2],i[3],i[4]*0.05+i[3]*i[2]*0.95/max_wh,i[5],i[6]] for i in objlist ]
        keep = py_cpu_nms(np.array(objlist), 1)
        outdets = []
        for index in keep:
            outdets.append(objlist[index])
        mergedresults[imageid] = outdets
        if nmsname == "softnms":
            keep = py_cpu_nms(np.array(objlist), 1)
            newdets, keep = soft_nms(np.array(objlist),
                                     iou_thr=iouthresh,
                                     method='linear',
                                     sigma=0.5,
                                     min_score=1e-3)  #'gaussian''linear',
            # keep =py_cpu_softnms(np.array(objlist),thresh=nms_thresh, Nt=0.02, sigma=0.5, method=1)
            outdets = []
            for index in keep:
                outdets.append(objlist[index])
            mergedresults[imageid] = outdets
        elif nmsname == "setnms":
            print(objlist[0])
            print(len(objlist[0]))
            keep = np.array(objlist)[set_cpu_nms(np.array(objlist),
                                                 iouthresh)].tolist()
            mergedresults[imageid] = keep
        elif nmsname == "nms":
            keep = py_cpu_nms(np.array(objlist), 1)
            outdets = []
            for index in keep:
                outdets.append(objlist[index])
            mergedresults[imageid] = outdets
        elif nmsname == False:
            print("no nms")
        else:
            raise ValueError('nmsname must is softnms or nms')
    savelist = []

    def say2(iss):
        imageid, objlist = iss[0], iss[1]
        templist = []

        for obj in objlist:  #obj [22528, 1270, 24576, 1, 1.0, 4]
            templist.append({
                "image_id": imageid,
                "category_id": int(obj[5]),
                "bbox": obj[:4],
                # "bbox": tlbr2tlwh(obj[:4]),
                "score": obj[4]
            })
        templist = fliter(imageid, objlist[1][5], templist)
        return templist

    executor = ThreadPoolExecutor(max_workers=80)
    func_var = [[file_name, dict_value]
                for file_name, dict_value in mergedresults.items()]
    print("fusion bbox into self'image start ")
    pbar2 = tqdm(total=len(mergedresults), ncols=50)
    for temp in executor.map(say2, func_var):
        # print(temp)
        savelist += temp
        pbar2.update(1)
    pbar2.close()
    # assert len(savelist)==0,f"error{savelist} error"
    if savejson:
        assert isinstance(savelist[0],
                          dict), f"the  results must is not {savelist[0]}"
        # if  not isinstance(savelist[0], dict):
        #     raise f"the  results must is not {savelist[0]}"
        # print(savelist[0]['category_id'])
        outfile = outfile[:-5].replace(
            "all", f"{savelist[1]['category_id']}") + ".json"
        with open(os.path.join(outpath, outfile), 'w') as f:
            dict_str = json.dumps(savelist, indent=2)
            f.write(dict_str)
            print(
                f"save ***{len(savelist)} results*** json :{os.path.join(outpath, outfile)}"
            )
    return savelist
예제 #36
0
class TriggerController:


    __INSTANCE = None
    __LOCK = threading.Lock()
    
    
    @staticmethod
    def instance():
        with TriggerController.__LOCK:
            TriggerController.__INSTANCE or TriggerController()
            return TriggerController.__INSTANCE

    def __init__(self):
        TriggerController.__INSTANCE = self
        
        self.__registeredRuleIds = set([])
        self.__threadPool = ThreadPoolExecutor(max_workers=1)
        
    def listen_to_trigger_callback(self, aFunc):
        '''
        aFunc:Function - Parameters (ruleId:Integer)
        '''
        ControllerModule.ON_TRIGGER_CALLBACK = aFunc
        
    def register_listener(self, ruleId, triggerDict):
        self.unregister_listener(ruleId)

        triggerType = triggerDict["type"]
        triggerParsedValue = triggerDict.get("parsedValue")
        
        #=======================================================================
        # Register trigger to scheduler
        #=======================================================================
        if triggerType == AppConstants.TRIGGER_TYPE_INTERVAL:

            def __signal_scheduler_add_interval_job(ruleId, seconds, minutes, hours):

                kwargs = {k:v for k, v in {"seconds":seconds, "minutes":minutes, "hours":hours}.items() if v > 0}

                try:
                    SchedulerService.add_interval_job(jobName=str(ruleId),
                                                      kbxTargetAppId=AppInfo.get_app_id(),
                                                      kbxTargetMethod="on_trigger_callback",
                                                      kbxTargetModule="controller_module",
                                                      kbxTargetParams={"ruleId":ruleId},
                                                      store=False,
                                                      **kwargs)

                    self.__registeredRuleIds.add(ruleId)

                except SystemException as e:
                    Logger.log_debug(e)

            self.__threadPool.submit(__signal_scheduler_add_interval_job, ruleId, **triggerParsedValue)

        elif triggerType == AppConstants.TRIGGER_TYPE_TIME:

            def __signal_scheduler_add_cron_job(ruleId, hour, minute):
                try:
                    SchedulerService.add_cron_job(jobName=str(ruleId),
                                                  kbxTargetAppId=AppInfo.get_app_id(),
                                                  kbxTargetMethod="on_trigger_callback",
                                                  kbxTargetModule="controller_module",
                                                  kbxTargetParams={"ruleId":ruleId},
                                                  store=False,
                                                  hour=str(hour),
                                                  minute=str(minute))

                    self.__registeredRuleIds.add(ruleId)

                except SystemException as e:
                    Logger.log_debug(e)

            self.__threadPool.submit(__signal_scheduler_add_cron_job, ruleId, **triggerParsedValue)

    def unregister_listener(self, ruleId):
        
        def __signal_scheduler_remove_task(ruleId):
            try:
                SchedulerService.remove_job(str(ruleId))
                self.__registeredRuleIds.remove(ruleId)
            except SystemException as e:
                Logger.log_debug(e)
                
        if ruleId in self.__registeredRuleIds:
            self.__threadPool.submit(__signal_scheduler_remove_task, ruleId)

    def parse_to_trigger_dto(self, trigger):
        #=======================================================================
        # Check if all required keys must exists
        #=======================================================================
        if "type" not in trigger:
            raise AutomationException(11703, "'type' must exists")

        #=======================================================================
        # Check 'type' against allowed values
        #=======================================================================
        triggerType = trigger["type"]
        if triggerType not in (AppConstants.TRIGGER_TYPE_EVENT, AppConstants.TRIGGER_TYPE_INTERVAL, AppConstants.TRIGGER_TYPE_TIME):
            raise AutomationException(11703, "'type' has invalid value")

        #=======================================================================
        # Compute 'parsedValue'
        #=======================================================================
        triggerValue = trigger.get("value")
        if triggerType == AppConstants.TRIGGER_TYPE_INTERVAL:
            triggerValue = ValueParser.get_number(triggerValue) # this must be an integer (seconds)
            if isinstance(triggerValue, int):
                if triggerValue > 0:
                    seconds = triggerValue % 60
                    minutes = math.floor(triggerValue / 60) % 60
                    hours = math.floor((triggerValue / 3600)) % 24

                    trigger["parsedValue"] = {"seconds":seconds, "minutes":minutes, "hours":hours}

                else:
                    raise AutomationException(11703, "'value' in 'trigger' must be larger than 0")
            else:
                raise AutomationException(11703, "'value' in 'trigger' must be a number in seconds")

        elif triggerType == AppConstants.TRIGGER_TYPE_TIME:
            triggerValue = ValueParser.get_string(triggerValue) # this must be in (HH, MM)
            if isinstance(triggerValue, str):
                triggerValue = triggerValue.split(":")

                if len(triggerValue) == 2:
                    #=======================================================
                    # Validate hour
                    #=======================================================
                    hour = ValueParser.get_number(triggerValue[0])

                    if hour is None or not 0 <= hour <= 23:
                        raise AutomationException(11703, "'HH is ranged from 00 - 23'")

                    #=======================================================
                    # Validate minute
                    #=======================================================
                    minute = ValueParser.get_number(triggerValue[1])

                    if minute is None or not 0 <= minute <= 59:
                        raise AutomationException(11703, "MM is ranged from 00 - 59")

                    trigger["parsedValue"] = {"hour":hour, "minute":minute}

                else:
                    raise AutomationException(11703, "'value' in 'trigger' must be in HH:MM format")
            else:
                raise AutomationException(11703, "'value' in 'trigger' must be string in HH:MM format")
        
        else:
            trigger["parsedValue"] = None

        return trigger
예제 #37
0
파일: views.py 프로젝트: and-sm/testgr
import json

from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from loader.methods.nose2 import Nose2Loader
from loader.methods.pytest import PytestLoader
from concurrent.futures.thread import ThreadPoolExecutor

pool = ThreadPoolExecutor(max_workers=1)


@csrf_exempt
def loader(request):
    if request.method != 'POST':
        return HttpResponse(status=403)

    body_unicode = request.body.decode('utf-8')
    try:
        data = json.loads(body_unicode)
    except json.JSONDecodeError:
        return HttpResponse(status=400)

    loaders = {
        '1': Nose2Loader,
        '2': PytestLoader,
    }

    current_loader = loaders.get(data.get('fw'))
    if current_loader is None:
        return HttpResponse(status=400)
예제 #38
0
    def run(cls, Obj):
        TestBase.process_now = 0

        def try_run(*args, **kargs):
            try:
                return Obj.test(*args, **kargs)
            except Exception as e:
                rprint(e)
                print_exception(e)

        test_if_same = set()
        result_zusammen = dict()
        hs = []
        for i in cls.ins:
            if (i.target.ip + i.target.ports) in test_if_same: continue
            if '/' in i.target.ports:
                i.target.port = i.target.ports.split("/")[0].strip()
            else:
                i.target.port = i.target.ports.strip()
            test_if_same.add(i.target.ip + i.target.ports)
            hs.append(i.target)
        #hs = [i.target for i in cls.ins]
        process_len = len(hs)
        if hasattr(Obj, '__name__'):
            cls.log("use :", Obj.__name__)
        if hasattr(Obj, "mode"):
            if Obj.mode == "thread":
                thread = 7
                if hasattr(Obj, 'thread'):
                    thread = int(Obj.thread)
                if hasattr(Obj, 'timeout'):
                    timeout = Obj.timeout
                else:
                    timeout = 12
                gprint("set mode : %s" % Obj.mode)
                gprint("set thread : %d" % thread)
                gprint("set timeout : %d" % timeout)
                with ThreadPoolExecutor(max_workers=thread) as exe:
                    if not hasattr(Obj, 'callback'):
                        if hasattr(Obj, 'log') and Obj.log == 'simple':

                            callback = lambda x: gprint(
                                x, "\nfinish done | %s" % colored(
                                    "-" * 5 + '\n', 'blue'))
                        else:
                            callback = lambda x: TestBase.process_add(
                                process_len)
                    else:
                        callback = Obj.callback

                    def callback_out(future, url=''):
                        try:
                            r = future.result(timeout=timeout)
                            result_zusammen[url] = r
                            callback(r)
                        except futures.TimeoutError:
                            rprint('timeout:', url)

                    for h in hs:
                        future = exe.submit(try_run, h)
                        future.add_done_callback(
                            partial(callback_out, url=h.ip))

                if 'has' in Obj.__name__ or 'if' in Obj.__name__:
                    Tprint(result_zusammen, color='green', attrs=['bold'])
        else:
            res = try_run(hs)
            if res:
                cls.log(res)
예제 #39
0
                Msg.reply_message(random.choice(tempChatBot[chat[2:]]))
            elif balas:
                Msg.reply_message(balas)
            else:
                Msg.reply_message(
                    random.choice([
                        "aku ndak Bisa jawab", "bilang apa tadi ?",
                        "gimana ya", "gak ngerti", "mana saya tau",
                        "maksud kamu apa bro"
                    ]))
        else:
            Msg.reply_message('Mau Nanya apa ?')


if __name__ == '__main__':

    if 'pickle.txt' in os.listdir('.'):
        driver.set_local_storage(pickle.loads(open("pickle.txt", "rb").read()))
        driver.connect()
        while True:
            if driver.is_logged_in():
                with ThreadPoolExecutor(max_workers=2) as executor:
                    executor.submit(main)
    else:
        while True:
            if driver.is_logged_in():
                open("pickle.txt",
                     "wb").write(pickle.dumps(driver.get_local_storage()))
                with ThreadPoolExecutor(max_workers=2) as executor:
                    executor.submit(main)
예제 #40
0
class RuleService:


    def __init__(self):
        # Rule processors.
        self.__ruleController = RuleController()
        self.__methodController = MethodController()
        self.__triggerController = TriggerController.instance()
        
        self.__ruleUpdateThreadPool = ThreadPoolExecutor(max_workers=1)
        self.__ruleExecThreadPool = ThreadPoolExecutor(max_workers=AppConstants.MAX_RULE_EXEC_THREAD_SIZE)
        
        # Rule run workers.
        self.__ruleExecInfos = {}
        self.__condCallGroup = MethodCallGroup()
        self.__execCallGroup = MethodCallGroup()
        
        # Listeners.
        self.__ruleController.listen_to_rule_status_change(self.__on_rule_status_changed)
        GroupController.instance().listen_to_group_icon_change(self.__on_group_icon_changed)
        self.__methodController.listen_to_method_status_change(self.__on_method_status_changed)
        EventController.instance().listen_to_event_callback(self.__on_method_event_callback)
        self.__triggerController.listen_to_trigger_callback(self.__on_trigger_callback)
        
    def __on_rule_status_changed(self, ruleId, oldEnabled, newEnabled, oldStatusProcessed, newStatusProcessed):
        '''
        Trigger Source: RuleController --> This
        Callback when a rule is re-enabled OR statusProcessed changed to "updated".
        '''
        if newEnabled == True and newStatusProcessed == AppConstants.RULE_STATUS_UPDATED:
            if oldEnabled != newEnabled or oldStatusProcessed != newStatusProcessed:
                self.__ruleExecThreadPool.submit(self.__trigger_rule_implementation, ruleId=ruleId, checkCondition=True)
        
    def __on_group_icon_changed(self, kbxGroupId):
        '''
        Trigger Source: GroupController --> This
        Callback when kbxGroupIcon changed.
        '''
        ruleIdsFromCond = self.__ruleController.list_rule_ids_which_has_kbx_group_id_as_condition(kbxGroupId)
        ruleIdsFromExec = self.__ruleController.list_rule_ids_which_has_kbx_group_id_as_execution(kbxGroupId)
            
        # Broadcast rules updated messages.
        for ruleId in set(ruleIdsFromCond + ruleIdsFromExec):
            self.__broadcast_message__rule_updated(ruleId)

    def __on_method_status_changed(self, kbxMethodId, oldKBXMethodStatus, newKBXMethodStatus):
        '''
        Trigger Source: MethodController --> This
        Callback when kbxMethodStatus changed.
        '''
        if oldKBXMethodStatus != newKBXMethodStatus:
            ruleIdsFromCond = self.__ruleController.list_rule_ids_which_has_kbx_method_id_as_condition(kbxMethodId)
            ruleIdsFromExec = self.__ruleController.list_rule_ids_which_has_kbx_method_id_as_execution(kbxMethodId)
                
#             # Executes rules with conditions affected.
#             if newKBXMethodStatus == SharedMethod.METHOD_STATUS_ACTIVE:
#                 for ruleId in ruleIdsFromCond:
#                     self.__ruleExecThreadPool.submit(self.__trigger_rule_implementation, ruleId=ruleId, checkCondition=True)
            
            # Broadcast rules updated messages.
            for ruleId in set(ruleIdsFromCond + ruleIdsFromExec):
                self.__broadcast_message__rule_updated(ruleId)
                
    def __on_method_event_callback(self, kbxMethodId, eventTag, eventData):
        '''
        Trigger Source: EventController --> MethodController --> This
        Callback when a method with event broadcasted event.
        '''
        ruleIds = self.__ruleController.list_rule_ids_which_has_kbx_method_id_as_condition(kbxMethodId)
        for ruleId in ruleIds:
            self.__ruleExecThreadPool.submit(self.__trigger_rule_implementation, ruleId=ruleId, 
                                             checkCondition=True, eventTag=eventTag, 
                                             eventData=eventData, eventMethodId=kbxMethodId)
        
    def __on_trigger_callback(self, ruleId):
        '''
        Trigger Source: TriggerController --> This
        Callback when a rule is triggered.
        '''
        self.__ruleExecThreadPool.submit(self.__trigger_rule_implementation, ruleId=ruleId, checkCondition=True)
        
    def set_rule(self, trigger, condition, execution, ruleId=None, ruleName=None, ruleProtected=False, enabled=True):
        '''
        Create/Edit(with ruleId provided) an existing rule.
        
        trigger:Dictionary
        condition:List
        execution:List
        ruleId:Integer <Optional>
        ruleName:String <Optional>
        ruleProtected:Boolean <Optional>
        enabled:Boolean
        
        Returns "ruleId"
        '''
        def process_method_list(methodList):
            #===================================================================
            # Basic type validation
            #===================================================================
            if not isinstance(methodList, list):
                Logger.log_error("RuleService.set_rule: 'condition' and 'execution' must be type of list.")
                Logger.log_debug("type:", type(methodList), "value:", methodList)
                raise AutomationException(11704, "List is required for both 'condition' and 'execution'")

            #===================================================================
            # Check allowed size, raise error if exceeded.
            #===================================================================
            methodListLen = len(methodList)
            if methodListLen > AppConstants.MAX_METHOD_SIZE:
                Logger.log_error("RuleService.set_rule: 'condition' and 'execution' cannot have more than", AppConstants.MAX_METHOD_SIZE, "items respectively.")
                raise AutomationException(11705, "Only a maximum of " + \
                                          str(AppConstants.MAX_METHOD_SIZE) + \
                                          " items is allowed for each 'condition' and 'execution' - given size " + \
                                          str(methodListLen),
                                          lambda text: str(AppConstants.MAX_METHOD_SIZE).join(text.split(":max_item_size:")))

            #===================================================================
            # Check if all kbxMethodIds are valid and all kbxMethodParams are list
            #===================================================================
            idValidator = NumberValidator(isRequired=True, decimalPoint=False)
            if not all([idValidator.is_valid(eachMethod["kbxMethodId"])
                        and isinstance(eachMethod["kbxMethodParams"], list)
                        for eachMethod in methodList]):
                raise AutomationException(11704, "'condition' and 'execution' have incorrect data structure.")

            #===================================================================
            # Check if all kbxParamName and kbxParamCurrentValue exists
            #===================================================================
            paramNameValidator = StringValidator(isRequired=True)
            for eachMethod in methodList:
                methodArgs = eachMethod["kbxMethodParams"]
                for methodArg in methodArgs:
                    if not paramNameValidator.is_valid(methodArg[AppConstants.ARG_NAME]):
                        raise AutomationException(11704, "'condition' and 'execution' have invalid params structure")

                    if not AppConstants.ARG_CURRENT_VALUE in methodArg:
                        methodArg[AppConstants.ARG_CURRENT_VALUE] = None
            
            return methodList

        #=======================================================================
        # Data structure validations
        #=======================================================================
        ruleId = NumberValidator(isRequired=False, decimalPoint=False).get_value(ruleId)
        triggerDTO = self.__triggerController.parse_to_trigger_dto(trigger)
        condition = process_method_list(condition)
        execution = process_method_list(execution)
        
        #=======================================================================
        # Add to database
        #=======================================================================
        if Util.is_empty(ruleId):
            # Validate_max_rule_size
            if self.__ruleController.count() >= AppConstants.MAX_RULE_SIZE:
                raise AutomationException(11706, 
                                          "Total amount of rules cannot be more than " + str(AppConstants.MAX_RULE_SIZE),
                                          lambda text: str(AppConstants.MAX_RULE_SIZE).join(text.split(":max_rule_size:")))
            ruleId = self.__ruleController.generate_id(ruleName)
            rule = {}
        elif self.__ruleController.has(ruleId):
            ruleFromDB = self.__ruleController.get(ruleId)
            rule = dict(ruleFromDB)
            self.__check_rule_process_status(ruleId)
            self.__ruleController.change_to_updating(ruleId, ruleName)
        else:
            raise AutomationException(11704, "Rule ID provided not found - " + str(ruleId))

        #=======================================================================
        # Broadcast message: starts to update rule.
        #=======================================================================
        self.__broadcast_message__rule_update_started(ruleId, ruleName)

        #=======================================================================
        # Set basic information of the rule
        #=======================================================================
        rule["ruleId"] = ruleId
        rule["ruleName"] = ruleName
        rule["ruleProtected"] = ruleProtected
        rule["trigger"] = triggerDTO
        rule["enabled"] = enabled
        
        rule["condition"] = condition
        rule["execution"] = execution

        #=======================================================================
        # Update rule
        #=======================================================================
        def __update_rule(rule):
            try:
                # Fire rule update start event
                ruleId = rule["ruleId"]
                
                # Add methods to subscribe list
                methodIds = [kbxMethod["kbxMethodId"] for kbxMethod in rule["condition"] + rule["execution"]]
                self.__methodController.add(methodIds)
                
                # Update "rule" base table
                self.__ruleController.update(rule)
                self.__ruleController.commit()
            
            except Exception as e:
                self.__ruleController.rollback()
                self.__broadcast_message__rule_update_failed(ruleId, ruleName)
                Logger.log_error("RuleService __update_rule failed:", e, "-- rolledback")
            else:
                self.__triggerController.register_listener(ruleId, rule["trigger"])
                
                # Process for Timer Module
                TimerModule.delete_scheduler(ruleId)
                
                timerModuleHandlers = {TimerModule.METHOD_ID_DATE_TIME_RANGE:TimerModule.handle_date_time_range,
                                       TimerModule.METHOD_ID_DAY_OF_WEEK:TimerModule.handle_dow,
                                       TimerModule.METHOD_ID_TIME_RANGE:TimerModule.handle_time_range}
                
                for kbxMethod in rule["condition"]:
                    kbxMethodId = kbxMethod["kbxMethodId"]
                    timerModuleHandler = timerModuleHandlers.get(kbxMethodId, None)
                    if timerModuleHandler is not None:
                        timerModuleHandler(ruleId, kbxMethod["kbxMethodParams"])
                    
                # Broadcast message: completed updating a rule
                self.__broadcast_message__rule_updated(ruleId)

        #=======================================================================
        # Submit to a thread to process other info, and return... performance...
        #=======================================================================
        self.__ruleUpdateThreadPool.submit(__update_rule, rule)

    def delete_rule(self, ruleId):
        self.__check_rule_process_status(ruleId)
        try:
            self.__ruleController.delete(ruleId)
            self.__ruleController.commit()
        except Exception as e:
            self.__ruleController.rollback()
            Logger.log_error("RuleService delete_rule ex:", e, "-- rolled back")
        else:
            self.__broadcast_message__rule_deleted(ruleId)
            self.__triggerController.unregister_listener(ruleId)
            TimerModule.delete_scheduler(ruleId)

    def trigger_rule(self, ruleId, checkCondition=False):
        '''
        self.__check_rule_process_status(ruleId) <-- Check again in self.__trigger_rule_implementation.
        '''
        self.__trigger_rule_implementation(ruleId=ruleId, checkCondition=checkCondition)
    
    def enable_rule(self, ruleId, enabled):
        self.__check_rule_process_status(ruleId)
        try:
            self.__ruleController.enable(ruleId, enabled)
            self.__ruleController.commit()
        except Exception as e:
            self.__ruleController.rollback()
            Logger.log_error("RuleService enable_rule ex:", e, "-- rolled back")
        else:
            self.__broadcast_message__rule_updated(ruleId)

    def get_rule(self, ruleId, language=AppInfo.DEFAULT_API_LANGUAGE):
        try:
            rule = self.__ruleController.get_detail(ruleId)
        except:
            raise AutomationException(11702, "Rule ID provided not found - " + str(ruleId))
        
        kbxMethods = list(rule["condition"]) + list(rule["execution"])
        
        # -------------- Compile lists of kbxMethod and group IDs contains in this rule.
        kbxMethodIdsToList = {}
        kbxGroupIdsToList = set([])
        
        for kbxMethod in kbxMethods:
            # Variables
            kbxMethodId = kbxMethod["kbxMethodId"]
            kbxMethodAppId = kbxMethod["kbxMethodAppId"]
            kbxMethodStatus = kbxMethod["kbxMethodStatus"]
            kbxGroupId = kbxMethod["kbxGroupId"]
            kbxGroupStatus = kbxMethod["kbxGroupStatus"]
            
            if kbxMethodStatus is not -1 and kbxMethodAppId is not None:
                kbxMethodIdsToList.setdefault(kbxMethodAppId, set([]))
                kbxMethodIdsToList[kbxMethodAppId].add(kbxMethodId)
            if kbxGroupId is not None and kbxGroupStatus is not -1:
                kbxGroupIdsToList.add(kbxGroupId)
                
        # -------------- Get methods and groups based on requested language.
        kbxMethodIdsListed = {}
        kbxGroupIdsListed = {}
                
        for kbxMethodAppId, kbxMethodIds in kbxMethodIdsToList.items():
            kbxMethodIdsListed[kbxMethodAppId] = SharedMethodWrapper.list_shared_methods_by_app_id(kbxMethodAppId, 
                                                                                                   list(kbxMethodIds), 
                                                                                                   language=language)
            
        groupList = SharedMethodWrapper.list_shared_method_groups(kbxGroupId=kbxGroupIdsToList, language=language)
        for row in groupList:
            kbxGroupIdsListed[row["kbxGroupId"]] = row
        
        # -------------- Set method and group data into rule.
        for kbxMethod in kbxMethods:
            # Variables
            kbxMethodId = kbxMethod["kbxMethodId"]
            kbxMethodAppId = kbxMethod["kbxMethodAppId"]
            kbxMethodStatus = kbxMethod["kbxMethodStatus"]
            kbxGroupId = kbxMethod["kbxGroupId"]
            kbxGroupStatus = kbxMethod["kbxGroupStatus"]
            
            if kbxMethodStatus is not -1 and kbxMethodAppId is not None:
                kbxMethodParamsWithCurrentValue = {kbxMethodParam["kbxParamName"]:kbxMethodParam["kbxParamCurrentValue"] \
                                                   for kbxMethodParam in kbxMethod["kbxMethodParams"]}
                kbxMethodWithDetails = kbxMethodIdsListed[kbxMethodAppId][kbxMethodId]
                if kbxMethodWithDetails is not None:
                    kbxMethodParamsWithDetails = kbxMethodWithDetails["kbxMethodParams"]
                    kbxMethodParamsWithDetails = copy.deepcopy(kbxMethodParamsWithDetails)
                    
                    for kbxMethodParam in kbxMethodParamsWithDetails:
                        kbxMethodParam["kbxParamCurrentValue"] = kbxMethodParamsWithCurrentValue.get(kbxMethodParam["kbxParamName"], None)
                    
                    kbxMethod["kbxMethodParams"] = kbxMethodParamsWithDetails
                    kbxMethod["kbxMethodHasEvent"] = not Util.is_empty(kbxMethodWithDetails.get("kbxMethodEvent", None)) \
                                                        and not Util.is_empty(kbxMethodWithDetails.get("kbxMethodIdentifier", None))
                    kbxMethod["kbxMethodLabel"] = kbxMethodWithDetails.get("kbxMethodLabel")
                    kbxMethod["kbxMethodDesc"] = kbxMethodWithDetails.get("kbxMethodDesc")
                
                else:
                    kbxMethod["atDebugMethod"] = "Unable to get shared method, caused by a method which never register itself on this bootup."
                    
            else:
                kbxMethod["kbxMethodHasEvent"] = False
                
            if kbxGroupId is not None and kbxGroupStatus is not -1:
                try:
                    kbxMethod["kbxGroupLabel"] = kbxGroupIdsListed[kbxGroupId]["kbxGroupLabel"]
                    kbxMethod["kbxGroupDesc"] = kbxGroupIdsListed[kbxGroupId]["kbxGroupDesc"]
                except:
                    kbxMethod["atDebugGroup"] = "Unable to get shared method group, caused by a group which never register itself on this bootup."
                    
        return rule

    def list_rules(self, offset=0, limit=20):
        return self.__ruleController.list(offset, limit), \
                self.__ruleController.count()
                
    def run_all_enabled_rules(self):
        '''
        Add the following 2 lines of code at AutomationModuleWrapper.py - start(), 
        after last statement, to enable run all rules on bootup.
        # Logger.log_info("Attempts to execute all enabled rules ...")
        # self.__ruleService.run_all_enabled_rules()
        '''
        ruleIds = self.__ruleController.list_rule_ids_which_are_enabled()
        for ruleId in ruleIds:
            self.__ruleExecThreadPool.submit(self.__trigger_rule_implementation, ruleId=ruleId, checkCondition=True)
                
    def __check_rule_process_status(self, ruleId):
        try:
            statusProcessed = self.__ruleController.get_status_processed(ruleId)
            if statusProcessed != AppConstants.RULE_STATUS_UPDATED:
                raise AutomationException(11703, "edit/delete/execute is not allowed on rule update in progress")
        except:
            raise AutomationException(11702, "Rule ID provided not found - " + str(ruleId))
        
    def __broadcast_message__rule_update_started(self, ruleId, ruleName=None):
        eventTag = AppConstants.EVENT_RULE_UPDATE_STARTED
        eventData = {"ruleId":ruleId, "newRuleName":ruleName}
        
        self.__broadcast_message(eventTag, eventData)
        Logger.log_info("Rule Start Update:", ruleName)

    def __broadcast_message__rule_updated(self, ruleId):
        try:
            rule = self.__ruleController.get_summary(ruleId)
        except Exception as e:
            Logger.log_error("RuleService.__broadcast_message__rule_updated get_summary ex:", e)
            return

        eventTag = AppConstants.EVENT_RULE_UPDATED
        eventData = rule
        
        self.__broadcast_message(eventTag, eventData)
        Logger.log_info("Rule Updated:", rule["ruleName"])
        
    def __broadcast_message__rule_update_failed(self, ruleId, ruleName=None):
        '''
        ruleName - For debugging purpose.
        '''
        try:
            rule = self.__ruleController.get_summary(ruleId)
        except Exception:
            rule = None
        
        eventTag = AppConstants.EVENT_RULE_UPDATE_FAILED
        eventData = {"ruleId": ruleId, "oldRuleSummary":rule}

        self.__broadcast_message(eventTag, eventData)
        Logger.log_info("Rule Update Failed:", ruleName)
        
    def __broadcast_message__rule_deleted(self, ruleId):
        eventTag = AppConstants.EVENT_RULE_DELETED
        eventData = {"ruleId": ruleId}

        self.__broadcast_message(eventTag, eventData)
        Logger.log_info("Rule Deleted: Id -", ruleId)
            
    def __broadcast_message(self, eventTag, eventData):
        eventData = json.dumps(eventData, cls=AutomationJSONEncoder)
        Application.send_web_server_event(eventTag, eventData)
        
    def __trigger_rule_implementation(self, ruleId, checkCondition=False, eventTag=None, eventData=None, eventMethodId=None):
        '''
        Triggers a rule by given ruleId.
        '''
        Logger.log_info("trigger rule id:", ruleId)
        
        # Check if rule is "updated" AND enabled.
        statusProcessed, enabled = self.__ruleController.get_status_processed_and_enabled(ruleId)
        if statusProcessed != AppConstants.RULE_STATUS_UPDATED or enabled != True:
            return
        
        self.__ruleExecInfos.setdefault(ruleId, RuleExecInfo())
        
        ruleExecInfo = self.__ruleExecInfos.get(ruleId)
        ruleExecInfo.increase_trigger_count()
        triggerCountInThisSession = ruleExecInfo.get_trigger_count()
        
        with ruleExecInfo.get_rlock():
            #=======================================================================
            # Check conditions
            #=======================================================================
            if checkCondition is True:
                # Check if we should proceed (stop if there is another pending request on the same ruleId).
                if triggerCountInThisSession != ruleExecInfo.get_trigger_count():
                    return
                
                methodListToCheck = deque()
                result = self.__ruleController.list_conditions(ruleId)
                methodCheckingTime = int(time.time())
                for row in result:
                    if row["kbxMethodStatus"] not in (SharedMethod.METHOD_STATUS_ACTIVE, SharedMethod.METHOD_STATUS_INACTIVE):
                        return
                    else:
                        methodArgs = row["kbxMethodParams"]
    
                        kwargs = {methodArg[AppConstants.ARG_NAME]:methodArg[AppConstants.ARG_CURRENT_VALUE] for methodArg in methodArgs}
                        
                        if eventTag is not None and eventMethodId == row["kbxMethodId"]:
                            kwargs[AppConstants.KEY_CONDITION_EVENT_TAG] = eventTag
                            kwargs[AppConstants.KEY_CONDITION_EVENT_DATA] = eventData
                            
                        if AppInfo.REQUEST_KEY_LANGUAGE not in kwargs:
                            kwargs[AppInfo.REQUEST_KEY_LANGUAGE] = AppInfo.DEFAULT_API_LANGUAGE
                            
                        kwargs["kbxMethodName"] = row["kbxMethodName"]
                        kwargs["kbxModuleName"] = row["kbxModuleName"]
                        kwargs["kbxGroupId"] = row["kbxGroupId"]
                        kwargs["kbxMethodAppId"] = row["kbxMethodAppId"]
                        
                        # Update ruleId if it is required by the method
                        if "ruleId" in kwargs:
                            kwargs["ruleId"] = str(ruleId)
                        
                        callId = hash(str(kwargs)) # Generate condition checking ID
                        kwargs[AppConstants.KEY_CONDITION_TIMESTAMP] = methodCheckingTime # So that timestamp will not caused the generated id to be different
                        methodListToCheck.append({"callId":callId,
                                                  "callFn":SharedMethod.call,
                                                  "callKwargs":kwargs})
                    
                #===============================================================
                # Submit all conditions for checking
                #===============================================================
                methodListToCheckLen = len(methodListToCheck)
                if methodListToCheckLen > 0:
                    ruleExecResult = RuleExecResult(methodListToCheckLen)
                    
                    for methodItem in methodListToCheck:
                        self.__condCallGroup.submit(callbackFn=self.__on_method_call_complete, ruleExecResult=ruleExecResult, **methodItem)
                    
                    result = ruleExecResult.wait(40.0)
                    
                    if result is False or ruleExecResult.get_result() is False:
                        return # Failed at condition checking.
            
                # Clear cache
                del(methodListToCheck)
                del(methodCheckingTime)
                del(methodListToCheckLen)
                
                # Check if we should proceed (stop if there is another pending request on the same ruleId).
                if triggerCountInThisSession != ruleExecInfo.get_trigger_count():
                    return

            #=======================================================================
            # Execute executions
            #=======================================================================
            methodListToExec = deque()
            result = self.__ruleController.list_executions(ruleId)
            methodExecTime = int(time.time())
            for row in result:
                if row["kbxMethodStatus"] not in (SharedMethod.METHOD_STATUS_ACTIVE, SharedMethod.METHOD_STATUS_INACTIVE):
                    continue
                else:
                    methodArgs = row["kbxMethodParams"]
                    kwargs = {methodArg[AppConstants.ARG_NAME]:methodArg[AppConstants.ARG_CURRENT_VALUE] for methodArg in methodArgs}
                    if AppInfo.REQUEST_KEY_LANGUAGE not in kwargs:
                        kwargs[AppInfo.REQUEST_KEY_LANGUAGE] = AppInfo.DEFAULT_API_LANGUAGE
                    
                    kwargs["kbxMethodName"] = row["kbxMethodName"]
                    kwargs["kbxModuleName"] = row["kbxModuleName"]
                    kwargs["kbxGroupId"] = row["kbxGroupId"]
                    kwargs["kbxMethodAppId"] = row["kbxMethodAppId"]
                    
                    # Update ruleId if it is required by the method
                    if "ruleId" in kwargs:
                        kwargs["ruleId"] = str(ruleId)
                    
                    callId = hash(str(kwargs)) # Generate execution id
                    kwargs[AppConstants.KEY_ACTION_TIMESTAMP] = methodExecTime
                    methodListToExec.append({"callId":callId,
                                             "callFn":SharedMethod.call,
                                             "callKwargs":kwargs})

            #===============================================================
            # Submit all methods for executions
            #===============================================================
            methodListToExecLen = len(methodListToExec)
            if methodListToExecLen > 0:
                ruleExecResult = RuleExecResult(methodListToExecLen)
                
                for methodItem in methodListToExec:
                    self.__execCallGroup.submit(callbackFn=self.__on_method_call_complete, ruleExecResult=ruleExecResult, **methodItem)
                
                result = ruleExecResult.wait(30.0)
                
                return
            
    def __on_method_call_complete(self, checkingId, result, ruleExecResult):
        '''
        When rule/execution checking is completed.
        '''
        ruleExecResult.set_result(result)
예제 #41
0
def main():
    with ThreadPoolExecutor(max_workers=THREAD_COUNT) as executor:
        executor.map(run, range(len(battles_to_run)))
예제 #42
0
class OpticalPathManager(object):
    """
    The purpose of this module is setting the physical components contained in
    the optical path of a SPARC system to the right position/configuration with
    respect to the mode given.
    """
    def __init__(self, microscope):
        """
        microscope (Microscope): the whole microscope component, thus it can
            handle all the components needed
        """
        self.microscope = microscope
        self._graph = affectsGraph(self.microscope)

        # Use subset for modes guessed
        if microscope.role == "sparc2":
            self._modes = copy.deepcopy(SPARC2_MODES)
        elif microscope.role in ("sparc-simplex", "sparc"):
            self._modes = copy.deepcopy(SPARC_MODES)
        else:
            raise NotImplementedError("Microscope role '%s' unsupported" % (microscope.role,))

        # keep list of already accessed components, to avoid creating new proxys
        # every time the mode changes
        self._known_comps = dict()  # str (role) -> component

        # All the actuators in the microscope, to cache proxy's to them
        self._actuators = []
        for comp in model.getComponents():
            if hasattr(comp, 'axes') and isinstance(comp.axes, dict):
                self._actuators.append(comp)

        # last known axes position
        self._stored = {}
        self._last_mode = None  # previous mode that was set
        # Removes modes which are not supported by the current microscope
        for m, (det, conf) in self._modes.items():
            try:
                comp = self._getComponent(det)
            except LookupError:
                logging.debug("Removing mode %s, which is not supported", m)
                del self._modes[m]

        # Create the guess information out of the mode
        # TODO: just make it a dict comprole -> mode
        self.guessed = self._modes.copy()
        # No stream should ever imply alignment mode
        for m in ALIGN_MODES:
            try:
                del self.guessed[m]
            except KeyError:
                pass  # Mode to delete is just not there

        # Handle different focus for chamber-view (in SPARCv2)
        if "chamber-view" in self._modes:
            self._focus_in_chamber_view = None
            self._focus_out_chamber_view = None
            # Check whether the focus affects the chamber view
            self._chamber_view_own_focus = False
            try:
                chamb_det = self._getComponent(self._modes["chamber-view"][0])
                focus = self._getComponent("focus")
                if self.affects(focus.name, chamb_det.name):
                    self._chamber_view_own_focus = True
            except LookupError:
                pass
            if not self._chamber_view_own_focus:
                logging.debug("No focus component affecting chamber")

        try:
            spec = self._getComponent("spectrometer")
        except LookupError:
            spec = None
        if self.microscope.role == "sparc2" and spec:
            # Remove the moves that don't affects the detector
            # TODO: do this for _all_ modes
            for mode in ('spectral', 'monochromator'):
                if mode in self._modes:
                    det_role = self._modes[mode][0]
                    det = self._getComponent(det_role)
                    modeconf = self._modes[mode][1]
                    for act_role in modeconf.keys():
                        try:
                            act = self._getComponent(act_role)
                        except LookupError:
                            # TODO: just remove that move too?
                            logging.debug("Failed to find component %s, skipping it", act_role)
                            continue
                        if not self.affects(act.name, det.name):
                            logging.debug("Actuator %s doesn't affect %s, so removing it from mode %s",
                                          act_role, det_role, mode)
                            del modeconf[act_role]

        # will take care of executing setPath asynchronously
        self._executor = ThreadPoolExecutor(max_workers=1)

    def __del__(self):
        logging.debug("Ending path manager")

        # Restore the spectrometer focus, so that on next start, this value will
        # be used again as "out of chamber view".
        if self._chamber_view_own_focus and self._last_mode == "chamber-view":
            focus_comp = self._getComponent("focus")
            if self._focus_out_chamber_view is not None:
                logging.debug("Restoring focus from before coming to chamber view to %s",
                              self._focus_out_chamber_view)
                try:
                    focus_comp.moveAbsSync(self._focus_out_chamber_view)
                except IOError as e:
                    logging.info("Actuator move failed giving the error %s", e)

        self._executor.shutdown(wait=False)

    def _getComponent(self, role):
        """
        same as model.getComponent, but optimised by caching the result
        return Component
        raise LookupError: if no component found
        """
        try:
            comp = self._known_comps[role]
        except LookupError:
            comp = model.getComponent(role=role)
            self._known_comps[role] = comp

        return comp

    @isasync
    def setPath(self, mode):
        """
        Just a wrapper of _doSetPath
        """
        f = self._executor.submit(self._doSetPath, mode)

        return f

    def _doSetPath(self, path):
        """
        Given a particular mode it sets all the necessary components of the
        optical path (found through the microscope component) to the
        corresponding positions.
        path (stream.Stream or str): The stream or the optical path mode
        raises:
                ValueError if the given mode does not exist
                IOError if a detector is missing
        """
        if isinstance(path, stream.Stream):
            mode = self.guessMode(path)
            if mode not in self._modes:
                raise ValueError("Mode '%s' does not exist" % (mode,))
            target = self.getStreamDetector(path)  # target detector
        else:
            mode = path
            if mode not in self._modes:
                raise ValueError("Mode '%s' does not exist" % (mode,))
            comp_role = self._modes[mode][0]
            comp = self._getComponent(comp_role)
            target = comp.name

        logging.debug("Going to optical path '%s', with target detector %s.", mode, target)

        fmoves = []  # moves in progress

        # Restore the spectrometer focus before any other move, as (on the SR193),
        # the value is grating/output dependent
        if self._chamber_view_own_focus and self._last_mode == "chamber-view":
            focus_comp = self._getComponent("focus")
            self._focus_in_chamber_view = focus_comp.position.value.copy()
            if self._focus_out_chamber_view is not None:
                logging.debug("Restoring focus from before coming to chamber view to %s",
                              self._focus_out_chamber_view)
                fmoves.append(focus_comp.moveAbs(self._focus_out_chamber_view))

        modeconf = self._modes[mode][1]
        for comp_role, conf in modeconf.items():
            # Try to access the component needed
            try:
                comp = self._getComponent(comp_role)
            except LookupError:
                logging.debug("Failed to find component %s, skipping it", comp_role)
                continue

            mv = {}
            for axis, pos in conf.items():
                if axis == "power":
                    if model.hasVA(comp, "power"):
                        try:
                            if pos == 'on':
                                comp.power.value = comp.power.range[1]
                            else:
                                comp.power.value = comp.power.range[0]
                            logging.debug("Updating power of comp %s to %f", comp.name, comp.power.value)
                        except AttributeError:
                            logging.debug("Could not retrieve power range of %s component", comp_role)
                    continue
                if isinstance(pos, str) and pos.startswith("MD:"):
                    pos = self.mdToValue(comp, pos[3:])[axis]
                if axis in comp.axes:
                    if axis == "band":
                        # Handle the filter wheel in a special way. Search
                        # for the key that corresponds to the value, most probably
                        # to the 'pass-through'
                        choices = comp.axes[axis].choices
                        for key, value in choices.items():
                            if value == pos:
                                pos = key
                                # Just to store current band in order to restore
                                # it once we leave this mode
                                if self._last_mode not in ALIGN_MODES:
                                    self._stored[axis] = comp.position.value[axis]
                                break
                        else:
                            logging.debug("Choice %s is not present in %s axis", pos, axis)
                            continue
                    elif axis == "grating":
                        # If mirror is to be used but not found in grating
                        # choices, then we use zero order. In case of
                        # GRATING_NOT_MIRROR we either use the last known
                        # grating or the first grating that is not mirror.
                        choices = comp.axes[axis].choices
                        if pos == "mirror":
                            # Store current grating (if we use one at the moment)
                            # to restore it once we use a normal grating again
                            if choices[comp.position.value[axis]] != "mirror":
                                self._stored[axis] = comp.position.value[axis]
                                self._stored['wavelength'] = comp.position.value['wavelength']
                            # Use the special "mirror" grating, if it exists
                            for key, value in choices.items():
                                if value == "mirror":
                                    pos = key
                                    break
                            else:
                                # Fallback to zero order (aka "low-quality mirror")
                                axis = 'wavelength'
                                pos = 0
                        elif pos == GRATING_NOT_MIRROR:
                            if choices[comp.position.value[axis]] == "mirror":
                                # if there is a grating stored use this one
                                # otherwise find the non-mirror grating
                                if axis in self._stored:
                                    pos = self._stored[axis]
                                else:
                                    pos = self.findNonMirror(choices)
                                if 'wavelength' in self._stored:
                                    mv['wavelength'] = self._stored['wavelength']
                            else:
                                pos = comp.position.value[axis]  # no change
                            try:
                                del self._stored[axis]
                            except KeyError:
                                pass
                            try:
                                del self._stored['wavelength']
                            except KeyError:
                                pass
                        else:
                            logging.debug("Using grating position as-is: '%s'", pos)
                            pass  # use pos as-is
                    elif axis == "slit-in":
                        if self._last_mode not in ALIGN_MODES:
                            # TODO: save also the component
                            self._stored[axis] = comp.position.value[axis]
                    elif hasattr(comp.axes[axis], "choices") and isinstance(comp.axes[axis].choices, dict):
                        choices = comp.axes[axis].choices
                        for key, value in choices.items():
                            if value == pos:
                                pos = key
                                break
                    mv[axis] = pos
                else:
                    logging.debug("Not moving axis %s.%s as it is not present", comp_role, axis)

            try:
                fmoves.append(comp.moveAbs(mv))
            except AttributeError:
                logging.debug("%s not an actuator", comp_role)

        # Now take care of the selectors based on the target detector
        fmoves.extend(self.selectorsToPath(target))

        # If we are about to leave alignment modes, restore values
        if self._last_mode in ALIGN_MODES and mode not in ALIGN_MODES:
            if 'band' in self._stored:
                try:
                    flter = self._getComponent("filter")
                    fmoves.append(flter.moveAbs({"band": self._stored['band']}))
                except LookupError:
                    logging.debug("No filter component available")
            if 'slit-in' in self._stored:
                try:
                    spectrograph = self._getComponent("spectrograph")
                    fmoves.append(spectrograph.moveAbs({"slit-in": self._stored['slit-in']}))
                except LookupError:
                    logging.debug("No spectrograph component available")

        # Save last mode
        self._last_mode = mode

        # wait for all the moves to be completed
        for f in fmoves:
            try:
                f.result()
            except IOError as e:
                logging.warning("Actuator move failed giving the error %s", e)

        # When going to chamber view, store the current focus position, and
        # restore the special focus position for chamber, after _really_ all
        # the other moves have finished, because the grating/output selector
        # moves affects the current position of the focus.
        if self._chamber_view_own_focus and mode == "chamber-view":
            focus_comp = self._getComponent("focus")
            self._focus_out_chamber_view = focus_comp.position.value.copy()
            if self._focus_in_chamber_view is not None:
                logging.debug("Restoring focus from previous chamber view to %s",
                              self._focus_in_chamber_view)
                try:
                    focus_comp.moveAbsSync(self._focus_in_chamber_view)
                except IOError as e:
                    logging.warning("Actuator move failed giving the error %s", e)

    def selectorsToPath(self, target):
        """
        Sets the selectors so the optical path leads to the target component
        (usually a detector).
        target (str): component name
        return (list of futures)
        """
        fmoves = []
        for comp in self._actuators:
            # TODO: pre-cache this as comp/target -> axis/pos

            # TODO: extend the path computation to "for every actuator which _affects_
            # the target, move if if position known, and update path to that actuator"?
            # Eg, this would improve path computation on SPARCv2 with fiber aligner
            mv = {}
            for an, ad in comp.axes.items():
                if hasattr(ad, "choices") and isinstance(ad.choices, dict):
                    for pos, value in ad.choices.items():
                        if target in value:
                            # set the position so it points to the target
                            mv[an] = pos

            comp_md = comp.getMetadata()
            if target in comp_md.get(model.MD_FAV_POS_ACTIVE_DEST, {}):
                mv.update(comp_md[model.MD_FAV_POS_ACTIVE])
            elif target in comp_md.get(model.MD_FAV_POS_DEACTIVE_DEST, {}):
                mv.update(comp_md[model.MD_FAV_POS_DEACTIVE])

            if mv:
                logging.debug("Move %s added so %s targets to %s", mv, comp.name, target)
                fmoves.append(comp.moveAbs(mv))
                # make sure this component is also on the optical path
                fmoves.extend(self.selectorsToPath(comp.name))

        return fmoves

    def guessMode(self, guess_stream):
        """
        Given a stream and by checking its components (e.g. role of detector)
        guesses and returns the corresponding optical path mode.
        guess_stream (object): The given optical stream
        returns (str): Mode estimated
        raises:
                LookupError if no mode can be inferred for the given stream
                IOError if given object is not a stream
        """
        if not isinstance(guess_stream, stream.Stream):
            raise IOError("Given object is not a stream")

        # Handle multiple detector streams
        if isinstance(guess_stream, stream.MultipleDetectorStream):
            for st in guess_stream.streams:
                try:
                    return self.guessMode(st)
                except LookupError:
                    pass
        else:
            for mode, conf in self.guessed.items():
                if conf[0] == guess_stream.detector.role:
                    return mode
        # In case no mode was found yet
        raise LookupError("No mode can be inferred for the given stream")

    def getStreamDetector(self, path_stream):
        """
        Given a stream find the detector.
        path_stream (object): The given stream
        returns (str): detector name
        raises:
                IOError if given object is not a stream
                LookupError: if stream has no detector
        """
        if not isinstance(path_stream, stream.Stream):
            raise IOError("Given object is not a stream")

        # Handle multiple detector streams
        if isinstance(path_stream, stream.MultipleDetectorStream):
            dets = []
            for st in path_stream.streams:
                try:
                    # Prefer the detectors which have a role in the mode, as it's much
                    # more likely to be the optical detector
                    # TODO: handle setting multiple optical paths? => return all the detectors
                    role = st.detector.role
                    name = st.detector.name
                    for conf in self.guessed.values():
                        if conf[0] == role:
                            return name
                    dets.append(name)
                except AttributeError:
                    pass
            if dets:
                logging.warning("No detector on stream %s has a known optical role", path_stream.name.value)
                return dets[0]
        else:
            try:
                return path_stream.detector.name
            except AttributeError:
                pass  # will raise error just after

        raise LookupError("Failed to find a detector on stream %s" % (path_stream.name.value))

    def findNonMirror(self, choices):
        """
        Given a dict of choices finds the one with value different than "mirror"
        """
        for key, value in choices.items():
            if value != "mirror":
                return key
        else:
            raise ValueError("Cannot find grating value in given choices")

    def mdToValue(self, comp, md_name):
        """
        Just retrieves the "md_name" metadata from component "comp"
        """
        md = comp.getMetadata()
        try:
            value = md.get(md_name)
            return value
        except KeyError:
            raise KeyError("Metadata %s does not exist in component %s" % (md_name, comp.name))

    def affects(self, affecting, affected):
        """
        Returns True if "affecting" component affects -directly of indirectly-
        the "affected" component
        """
        path = self.findPath(affecting, affected)
        if path is None:
            return False
        else:
            return True

    def findPath(self, node1, node2, path=[]):
        """
        Find any path between node1 and node2 (may not be shortest)
        """
        path = path + [node1]
        if node1 == node2:
            return path
        if node1 not in self._graph:
            return None
        for node in self._graph[node1]:
            if node not in path:
                new_path = self.findPath(node, node2, path)
                if new_path:
                    return new_path
        return None
예제 #43
0
    def submit(self, fn, *args, **kwargs):
        if isinstance(fn, Task):
            self._task_map[id(fn)] = fn

        return ThreadPoolExecutor.submit(self, fn, *args, **kwargs)
예제 #44
0
def main():
    with ThreadPoolExecutor(max_workers=8) as executor:
        while True:
            task = []
            chatTextObject = driver.get_unread()
            for chatObject in chatTextObject:
                for TextObject in chatObject.messages:
                    if TextObject.type == 'chat':
                        if set(TextObject.content.lower().split(' ')) & set(
                                kasar):  #anda Bisa mengaktifkan Anti Toxic
                            if '@g.us' in TextObject.chat_id:
                                try:
                                    ksr = Kasar(TextObject.chat_id)
                                    ksr.add_check_kick(chatObject.chat,
                                                       TextObject)
                                except Exception as e:
                                    False
                        elif TextObject.content.split(
                        )[0] in FullCommand or TextObject.content.split(
                                '|')[0] in FullCommand:
                            executor.submit(replyCommand, (TextObject),
                                            (chatObject.chat))
                        elif TextObject.content.split(
                        )[0] != '#' and '@g.us' != TextObject.chat_id:
                            if '@c.us' in TextObject.chat_id:
                                Mc = chatBot(TextObject.content)
                                Mc.max_()
                                balas = Mc.balas()
                                if tempChatBot.get(TextObject.content):
                                    TextObject.reply_message(
                                        random.choice(
                                            tempChatBot[TextObject.content]))
                                elif balas:
                                    TextObject.reply_message(balas)
                                else:
                                    TextObject.reply_message(
                                        random.choice([
                                            "aku ndak Bisa jawab",
                                            "bilang apa tadi ?", "gimana ya",
                                            "gak ngerti", "mana saya tau",
                                            "maksud kamu apa bro"
                                        ]))
                    elif TextObject.type == 'image':
                        executor.submit(recImageReplyCommand, (TextObject),
                                        (chatObject.chat))
                    elif TextObject.type == 'vcard':
                        masuk = 'SELAMAT DATANG :\n'
                        for i in TextObject.contacts:
                            for u in re.findall('waid\=(.*?):', i.decode()):
                                try:
                                    chatObject.chat.add_participant_group(
                                        '*****@*****.**' % (u))
                                    masuk += '-@%s' % (u)
                                except Exception:
                                    TextObject.reply_message(
                                        'Menambahkan %s Sukses' % (u))
                        try:
                            driver.wapi_functions.sendMessageWithMentions(
                                chatObject.chat.id, masuk, '')
                        except Exception:
                            False
예제 #45
0
import asyncio
import itertools
import re
import time
from concurrent.futures.thread import ThreadPoolExecutor
from functools import partial
from io import BytesIO

import aiohttp
from PIL import Image

IMG_PATTERN = re.compile(r'url\(\/\/(live.staticflickr\.com.+\.jpg)\)')
thread_pool = ThreadPoolExecutor()


def flatten(iterable):
    return list(itertools.chain.from_iterable(iterable))


async def fetch_html(url, session):
    response = await session.get(url, allow_redirects=False)

    if response.status != 200:
        return ''

    return await response.text()


async def find_images_in_page(url, session):
    html = await fetch_html(url, session)
def stop_orphaned_threads():
    # make sure we shut down any orphaned threads and create a new Executor for each test
    PostgresStatementSamples.executor.shutdown(wait=True)
    PostgresStatementSamples.executor = ThreadPoolExecutor()
class BaseRequestHandler(RequestHandler):

    executor = ThreadPoolExecutor(max_workers=server_configs["max_workers"])

    def set_default_headers(self):
        self.set_header('Content-Type', 'application/json')
        self.set_header("Access-Control-Allow-Origin", "*")
        self.set_header("Access-Control-Expose-Headers", "*")
        self.set_header("Access-Control-Allow-Credentials", "false")
        self.set_header("Access-Control-Allow-Headers", "x-requested-with")
        self.set_header("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
        self.set_header('Content-Type', 'application/json')

    def response(self, responseObject):

        if isinstance(responseObject, Success):
            self.write(responseObject)

        if isinstance(responseObject, Panic):
            self.write_error(responseObject)

        if isinstance(responseObject, str):
            self.set_header('Content-Type', 'text/html; charset=UTF-8')
            self.write(responseObject)

    def write(self, chunk):

        if self._finished:
            raise RuntimeError("Cannot write() after finish()")

        if not isinstance(chunk, (bytes, unicode_type, dict, Success)):
            message = "write() only accepts bytes, unicode, dict, Success objects"
            if isinstance(chunk, list):
                message += ". Lists not accepted for security reasons; see " + \
                    "http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.write"
            raise TypeError(message)

        if isinstance(chunk, dict):
            logger.error("its dict")
            chunk = escape.json_encode(chunk)
            self.set_header("Content-Type", "application/json; charset=UTF-8")

        if isinstance(chunk, Success):
            chunk = escape.json_encode(chunk.reason)
            self.set_header("Content-Type", "application/json; charset=UTF-8")

        chunk = escape.utf8(chunk)
        self._write_buffer.append(chunk)
        self.finish()

    def write_error(self, error, **kwargs):

        self.set_status(error.status_code)
        self.set_header('Content-Type', 'application/json')
        if self.settings.get("serve_traceback") and "exc_info" in kwargs:
            # in debug mode, try to send a traceback
            lines = [l for l in traceback.format_exception(*kwargs["exc_info"])]
            
            self.finish(json.dumps({
                'error': {
                    'message': error.reason,
                    'traceback': lines,
                }
            }))
        else:
            self.finish(json.dumps({
                'error': {
                    'message': error.reason,
                }
            }))
예제 #48
0
music_url = "https://www.iesdouyin.com/web/api/v2/music/list/aweme/"  # 音乐地址
music_info_url = "https://www.iesdouyin.com/web/api/v2/music/info/"  # 音乐详情

hd = {
    'authority': 'aweme.snssdk.com',
    'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, '
                  'like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1'
}

THREADS = 2
# 每次分页数量
PAGE_NUM = 10

# TODO 后期增加多线程下载
# 10个线程
pool = ThreadPoolExecutor(10)

HEADERS = {
    'authority': 'aweme.snssdk.com',
    'accept-encoding': 'gzip, deflate, br',
    'accept-language': 'zh-CN,zh;q=0.9',
    'pragma': 'no-cache',
    'x-requested-with': 'XMLHttpRequest',
    'accept': 'application/json',
    'cache-control': 'no-cache',
    'upgrade-insecure-requests': '1',
    'user-agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) "
                  "Version/11.0 Mobile/15A372 Safari/604.1",
}

예제 #49
0
    def __init__(self, microscope):
        """
        microscope (Microscope): the whole microscope component, thus it can
            handle all the components needed
        """
        self.microscope = microscope
        self._graph = affectsGraph(self.microscope)

        # Use subset for modes guessed
        if microscope.role == "sparc2":
            self._modes = copy.deepcopy(SPARC2_MODES)
        elif microscope.role in ("sparc-simplex", "sparc"):
            self._modes = copy.deepcopy(SPARC_MODES)
        else:
            raise NotImplementedError("Microscope role '%s' unsupported" % (microscope.role,))

        # keep list of already accessed components, to avoid creating new proxys
        # every time the mode changes
        self._known_comps = dict()  # str (role) -> component

        # All the actuators in the microscope, to cache proxy's to them
        self._actuators = []
        for comp in model.getComponents():
            if hasattr(comp, 'axes') and isinstance(comp.axes, dict):
                self._actuators.append(comp)

        # last known axes position
        self._stored = {}
        self._last_mode = None  # previous mode that was set
        # Removes modes which are not supported by the current microscope
        for m, (det, conf) in self._modes.items():
            try:
                comp = self._getComponent(det)
            except LookupError:
                logging.debug("Removing mode %s, which is not supported", m)
                del self._modes[m]

        # Create the guess information out of the mode
        # TODO: just make it a dict comprole -> mode
        self.guessed = self._modes.copy()
        # No stream should ever imply alignment mode
        for m in ALIGN_MODES:
            try:
                del self.guessed[m]
            except KeyError:
                pass  # Mode to delete is just not there

        # Handle different focus for chamber-view (in SPARCv2)
        if "chamber-view" in self._modes:
            self._focus_in_chamber_view = None
            self._focus_out_chamber_view = None
            # Check whether the focus affects the chamber view
            self._chamber_view_own_focus = False
            try:
                chamb_det = self._getComponent(self._modes["chamber-view"][0])
                focus = self._getComponent("focus")
                if self.affects(focus.name, chamb_det.name):
                    self._chamber_view_own_focus = True
            except LookupError:
                pass
            if not self._chamber_view_own_focus:
                logging.debug("No focus component affecting chamber")

        try:
            spec = self._getComponent("spectrometer")
        except LookupError:
            spec = None
        if self.microscope.role == "sparc2" and spec:
            # Remove the moves that don't affects the detector
            # TODO: do this for _all_ modes
            for mode in ('spectral', 'monochromator'):
                if mode in self._modes:
                    det_role = self._modes[mode][0]
                    det = self._getComponent(det_role)
                    modeconf = self._modes[mode][1]
                    for act_role in modeconf.keys():
                        try:
                            act = self._getComponent(act_role)
                        except LookupError:
                            # TODO: just remove that move too?
                            logging.debug("Failed to find component %s, skipping it", act_role)
                            continue
                        if not self.affects(act.name, det.name):
                            logging.debug("Actuator %s doesn't affect %s, so removing it from mode %s",
                                          act_role, det_role, mode)
                            del modeconf[act_role]

        # will take care of executing setPath asynchronously
        self._executor = ThreadPoolExecutor(max_workers=1)
 def _retrieve_results(tasks: List[AwsQuantumTask],
                       max_workers: int) -> List[AwsQuantumTask]:
     with ThreadPoolExecutor(max_workers=max_workers) as executor:
         result_futures = [executor.submit(task.result) for task in tasks]
     return [future.result() for future in result_futures]
예제 #51
0
                end = time.time()
                time_list.append((end-start)*1000)
                self.request += 1
                self.data_sent += getsizeof(self.message.encode())
                time.sleep(1/REQUESTS)



        finally:
            self.sock.close()

        print("Client information-", self.name, "sent", self.data_sent,
              "bytes, and made", self.request, "requests\n")

if __name__ == "__main__":
    with ThreadPoolExecutor(CLIENTS) as executor:
        for CLIENT in range(CLIENTS):
            executor.submit(
                clients.append(Client(SERVER_IP, PORT_NUMBER, MESSAGE, REQUESTS, CLIENT)))

    with ThreadPoolExecutor(CLIENTS) as executor:
        for client in clients:

            executor.submit(client.run())
    total_requests_sent = 0
    for c in clients:
        total_requests_sent += c.request
    print("Created", str(len(clients)),"clients")
    print("Total requests sent:", str(total_requests_sent))
    print("Average time of getting response: " + str(mean(time_list)) + " ms" + "\n")
예제 #52
0
class DBMAsyncJob(object):
    # Set an arbitrary high limit so that dbm async jobs (which aren't CPU bound) don't
    # get artificially limited by the default max_workers count. Note that since threads are
    # created lazily, it's safe to set a high maximum
    executor = ThreadPoolExecutor(100000)
    """
    Runs Async Jobs
    """
    def __init__(
            self,
            check,
            config_host=None,
            min_collection_interval=15,
            dbms="TODO",
            rate_limit=1,
            run_sync=False,
            enabled=True,
            expected_db_exceptions=(),
            shutdown_callback=None,
            job_name=None,
    ):
        self._check = check
        self._config_host = config_host
        self._min_collection_interval = min_collection_interval
        # map[dbname -> psycopg connection]
        self._log = get_check_logger()
        self._job_loop_future = None
        self._cancel_event = threading.Event()
        self._tags = None
        self._tags_no_db = None
        self._run_sync = None
        self._db_hostname = None
        self._last_check_run = 0
        self._shutdown_callback = shutdown_callback
        self._dbms = dbms
        self._rate_limiter = ConstantRateLimiter(rate_limit)
        self._run_sync = run_sync
        self._enabled = enabled
        self._expected_db_exceptions = expected_db_exceptions
        self._job_name = job_name

    def cancel(self):
        self._cancel_event.set()

    def run_job_loop(self, tags):
        """
        :param tags:
        :return:
        """
        if not self._enabled:
            self._log.debug("[job=%s] Job not enabled.", self._job_name)
            return
        if not self._db_hostname:
            self._db_hostname = resolve_db_host(self._config_host)
        self._tags = tags
        self._tags_str = ','.join(self._tags)
        self._job_tags = self._tags + ["job:{}".format(self._job_name)]
        self._job_tags_str = ','.join(self._job_tags)
        self._last_check_run = time.time()
        if self._run_sync or is_affirmative(
                os.environ.get('DBM_THREADED_JOB_RUN_SYNC', "false")):
            self._log.debug("Running threaded job synchronously. job=%s",
                            self._job_name)
            self._run_job_rate_limited()
        elif self._job_loop_future is None or not self._job_loop_future.running(
        ):
            self._job_loop_future = DBMAsyncJob.executor.submit(self._job_loop)
        else:
            self._log.debug("Job loop already running. job=%s", self._job_name)

    def _job_loop(self):
        try:
            self._log.info("[%s] Starting job loop", self._job_tags_str)
            while True:
                if self._cancel_event.isSet():
                    self._log.info("[%s] Job loop cancelled",
                                   self._job_tags_str)
                    self._check.count("dd.{}.async_job.cancel".format(
                        self._dbms),
                                      1,
                                      tags=self._job_tags,
                                      raw=True)
                    break
                if time.time(
                ) - self._last_check_run > self._min_collection_interval * 2:
                    self._log.info(
                        "[%s] Job loop stopping due to check inactivity",
                        self._job_tags_str)
                    self._check.count("dd.{}.async_job.inactive_stop".format(
                        self._dbms),
                                      1,
                                      tags=self._job_tags,
                                      raw=True)
                    break
                self._run_job_rate_limited()
        except Exception as e:
            if self._cancel_event.isSet():
                # canceling can cause exceptions if the connection is closed the middle of the check run
                # in this case we still want to report it as a cancellation instead of a crash
                self._log.debug("[%s] Job loop error after cancel: %s",
                                self._job_tags_str, e)
                self._log.info("[%s] Job loop cancelled", self._job_tags_str)
                self._check.count("dd.{}.async_job.cancel".format(self._dbms),
                                  1,
                                  tags=self._job_tags,
                                  raw=True)
            elif isinstance(e, self._expected_db_exceptions):
                self._log.warning(
                    "[%s] Job loop database error: %s",
                    self._job_tags_str,
                    e,
                    exc_info=self._log.getEffectiveLevel() == logging.DEBUG,
                )
                self._check.count(
                    "dd.{}.async_job.error".format(self._dbms),
                    1,
                    tags=self._job_tags +
                    ["error:database-{}".format(type(e))],
                    raw=True,
                )
            else:
                self._log.exception("[%s] Job loop crash", self._job_tags_str)
                self._check.count(
                    "dd.{}.async_job.error".format(self._dbms),
                    1,
                    tags=self._job_tags + ["error:crash-{}".format(type(e))],
                    raw=True,
                )
        finally:
            self._log.info("[%s] Shutting down job loop", self._job_tags_str)
            if self._shutdown_callback:
                self._shutdown_callback()

    def _set_rate_limit(self, rate_limit):
        if self._rate_limiter.rate_limit_s != rate_limit:
            self._rate_limiter = ConstantRateLimiter(rate_limit)

    def _run_job_rate_limited(self):
        self._run_job_traced()
        self._rate_limiter.sleep()

    @_traced_dbm_async_job_method
    def _run_job_traced(self):
        return self.run_job()

    def run_job(self):
        raise NotImplementedError()
'''
Created on Jan 8, 2019

@author: balasubramanyas
'''

from concurrent.futures.thread import ThreadPoolExecutor


def printData(x):
    return x + 2


if __name__ == '__main__':
    values = [1, 2, 3, 4]
    executor = ThreadPoolExecutor(2)
    # Submit method
    print("Executor.submit() : ")
    submitresultData = {executor.submit(printData, i): i for i in values}
    for res in submitresultData:
        print(res.result())

    # Map method
    print("Executor.map() : ")
    mapresultData = executor.map(printData, values)
    for res in mapresultData:
        print(res)
    pass
예제 #54
0
    def refresh_batch(self, plot_paths: List[Path],
                      plot_directories: Set[Path]) -> PlotRefreshResult:
        start_time: float = time.time()
        result: PlotRefreshResult = PlotRefreshResult(
            processed=len(plot_paths))
        counter_lock = threading.Lock()

        log.debug(
            f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}"
        )

        if self.match_str is not None:
            log.info(
                f'Only loading plots that contain "{self.match_str}" in the file or directory name'
            )

        def process_file(file_path: Path) -> Optional[PlotInfo]:
            if not self._refreshing_enabled:
                return None
            filename_str = str(file_path)
            if self.match_str is not None and self.match_str not in filename_str:
                return None
            if (file_path in self.failed_to_open_filenames and
                (time.time() - self.failed_to_open_filenames[file_path]) <
                    self.refresh_parameter.retry_invalid_seconds):
                # Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
                return None

            if file_path in self.plots:
                return self.plots[file_path]

            entry: Optional[Tuple[str,
                                  Set[str]]] = self.plot_filename_paths.get(
                                      file_path.name)
            if entry is not None:
                loaded_parent, duplicates = entry
                if str(file_path.parent) in duplicates:
                    log.debug(f"Skip duplicated plot {str(file_path)}")
                    return None
            try:
                if not file_path.exists():
                    return None

                prover = DiskProver(str(file_path))

                log.debug(f"process_file {str(file_path)}")

                expected_size = _expected_plot_size(
                    prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
                stat_info = file_path.stat()

                # TODO: consider checking if the file was just written to (which would mean that the file is still
                # being copied). A segfault might happen in this edge case.

                if prover.get_size(
                ) >= 30 and stat_info.st_size < 0.98 * expected_size:
                    log.warning(
                        f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
                        f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
                    )
                    return None

                cache_entry = self.cache.get(prover.get_id())
                if cache_entry is None:
                    (
                        pool_public_key_or_puzzle_hash,
                        farmer_public_key,
                        local_master_sk,
                    ) = parse_plot_info(prover.get_memo())

                    # Only use plots that correct keys associated with them
                    if farmer_public_key not in self.farmer_public_keys:
                        log.warning(
                            f"Plot {file_path} has a farmer public key that is not in the farmer's pk list."
                        )
                        self.no_key_filenames.add(file_path)
                        if not self.open_no_key_filenames:
                            return None

                    pool_public_key: Optional[G1Element] = None
                    pool_contract_puzzle_hash: Optional[bytes32] = None
                    if isinstance(pool_public_key_or_puzzle_hash, G1Element):
                        pool_public_key = pool_public_key_or_puzzle_hash
                    else:
                        assert isinstance(pool_public_key_or_puzzle_hash,
                                          bytes32)
                        pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash

                    if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
                        log.warning(
                            f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list."
                        )
                        self.no_key_filenames.add(file_path)
                        if not self.open_no_key_filenames:
                            return None

                    # If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove
                    # the current plot from that list if its in there since we passed the key checks above.
                    if file_path in self.no_key_filenames:
                        self.no_key_filenames.remove(file_path)

                    local_sk = master_sk_to_local_sk(local_master_sk)

                    plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
                        local_sk.get_g1(), farmer_public_key,
                        pool_contract_puzzle_hash is not None)

                    cache_entry = CacheEntry(pool_public_key,
                                             pool_contract_puzzle_hash,
                                             plot_public_key)
                    self.cache.update(prover.get_id(), cache_entry)

                with self.plot_filename_paths_lock:
                    paths: Optional[Tuple[
                        str, Set[str]]] = self.plot_filename_paths.get(
                            file_path.name)
                    if paths is None:
                        paths = (str(Path(prover.get_filename()).parent),
                                 set())
                        self.plot_filename_paths[file_path.name] = paths
                    else:
                        paths[1].add(str(Path(prover.get_filename()).parent))
                        log.warning(
                            f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}."
                        )
                        return None

                new_plot_info: PlotInfo = PlotInfo(
                    prover,
                    cache_entry.pool_public_key,
                    cache_entry.pool_contract_puzzle_hash,
                    cache_entry.plot_public_key,
                    stat_info.st_size,
                    stat_info.st_mtime,
                )

                with counter_lock:
                    result.loaded.append(new_plot_info)

                if file_path in self.failed_to_open_filenames:
                    del self.failed_to_open_filenames[file_path]

            except Exception as e:
                tb = traceback.format_exc()
                log.error(f"Failed to open file {file_path}. {e} {tb}")
                self.failed_to_open_filenames[file_path] = int(time.time())
                return None
            log.info(
                f"Found plot {file_path} of size {new_plot_info.prover.get_size()}"
            )
            return new_plot_info

        with self, ThreadPoolExecutor() as executor:
            plots_refreshed: Dict[Path, PlotInfo] = {}
            for new_plot in executor.map(process_file, plot_paths):
                if new_plot is not None:
                    plots_refreshed[Path(
                        new_plot.prover.get_filename())] = new_plot
            self.plots.update(plots_refreshed)

        result.duration = time.time() - start_time

        self.log.debug(
            f"refresh_batch: loaded {len(result.loaded)}, "
            f"removed {len(result.removed)}, processed {result.processed}, "
            f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
            f"duration: {result.duration:.2f} seconds")
        return result
예제 #55
0
파일: http.py 프로젝트: westonpace/bowser
 def __init__(self):
     self.__async_executor = ThreadPoolExecutor(max_workers=10)
     self.logger = logging.getLogger(__name__)
     self.__http = Http()
예제 #56
0
    parser.add_argument("--use_cache", type=str, required=True)
    parser.add_argument("--images", type=str, required=True)
    parser.add_argument("--results", type=str, required=True)

    args = parser.parse_args()
    images = Path(args.images).glob("*.jpg")

    face_det = fm.FaceMatch(args.fid_m)
    human_det = hd.CocoDetectorAPI(path_to_ckpt=args.hd_m)

    experiment = ef.BlurExperiments(face_det, human_det)

    os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(0)

    for image in images:

        image_name = Path(image).resolve().stem
        full_image_path = args.results + "/{}_{}_data.pkl".format(image_name, args.blur_kernel)

        if args.use_cache == 'y' and os.path.exists(full_image_path):
            pass
        else:
            process_image(image_name, image, args, experiment, args.results)


if __name__ == "__main__":

    pool = ThreadPool(4)
    executor = ThreadPoolExecutor(max_workers=8)
    main_method()
예제 #57
0
 def __init__(self):
     TriggerController.__INSTANCE = self
     
     self.__registeredRuleIds = set([])
     self.__threadPool = ThreadPoolExecutor(max_workers=1)
    def next_chunk(self):
        """Gets the next chunk if ready."""
        logger.debug("next_chunk_to_consume={next_chunk_to_consume}, "
                     "next_chunk_to_download={next_chunk_to_download}, "
                     "total_chunks={total_chunks}".format(
                         next_chunk_to_consume=self._next_chunk_to_consume + 1,
                         next_chunk_to_download=self._next_chunk_to_download +
                         1,
                         total_chunks=self._chunk_size,
                     ))
        if self._next_chunk_to_consume > 0:
            # clean up the previously fetched data
            n = self._next_chunk_to_consume - 1
            self._chunks[n] = self._chunks[n]._replace(result_data=None,
                                                       ready=False)

            if self._next_chunk_to_download < self._chunk_size:
                self._pool.submit(self._download_chunk,
                                  self._next_chunk_to_download)
                self._next_chunk_to_download += 1

        if self._downloader_error is not None:
            raise self._downloader_error

        for attempt in range(MAX_RETRY_DOWNLOAD):
            logger.debug(
                "waiting for chunk %s/%s"
                " in %s/%s download attempt",
                self._next_chunk_to_consume + 1,
                self._chunk_size,
                attempt + 1,
                MAX_RETRY_DOWNLOAD,
            )
            done = False
            for wait_counter in range(MAX_WAIT):
                with self._chunk_cond:
                    if self._downloader_error:
                        raise self._downloader_error
                    if self._chunks[self._next_chunk_to_consume].ready:
                        done = True
                        break
                    logger.debug(
                        "chunk %s/%s is NOT ready to consume"
                        " in %s/%s(s)",
                        self._next_chunk_to_consume + 1,
                        self._chunk_size,
                        (wait_counter + 1) * WAIT_TIME_IN_SECONDS,
                        MAX_WAIT * WAIT_TIME_IN_SECONDS,
                    )
                    self._chunk_cond.wait(WAIT_TIME_IN_SECONDS)
            else:
                logger.debug(
                    "chunk %s/%s is still NOT ready. Restarting chunk "
                    "downloader threads",
                    self._next_chunk_to_consume + 1,
                    self._chunk_size,
                )
                self._pool.shutdown(wait=False)  # terminate the thread pool
                self._pool = ThreadPoolExecutor(self._effective_threads)
                for idx0 in range(self._effective_threads):
                    idx = idx0 + self._next_chunk_to_consume
                    self._pool.submit(self._download_chunk, idx)
            if done:
                break
        else:
            Error.errorhandler_wrapper(
                self._connection,
                self._cursor,
                OperationalError,
                {
                    "msg": "The result set chunk download fails or hang for "
                    "unknown reason.",
                    "errno": ER_CHUNK_DOWNLOAD_FAILED,
                },
            )
        logger.debug(
            "chunk %s/%s is ready to consume",
            self._next_chunk_to_consume + 1,
            self._chunk_size,
        )

        ret = self._chunks[self._next_chunk_to_consume]
        self._next_chunk_to_consume += 1
        return ret
예제 #59
0
파일: _futures.py 프로젝트: delmic/odemis
 def __init__(self, max_workers):
     ThreadPoolExecutor.__init__(self, max_workers)
     self._queue = collections.deque() # thread-safe queue of futures
class SnowflakeChunkDownloader(object):
    """Large Result set chunk downloader class."""
    def _pre_init(
        self,
        chunks,
        connection,
        cursor,
        qrmk,
        chunk_headers,
        query_result_format="JSON",
        prefetch_threads=DEFAULT_CLIENT_PREFETCH_THREADS,
    ):
        self._query_result_format = query_result_format

        self._downloader_error = None

        self._connection = connection
        self._cursor = cursor
        self._qrmk = qrmk
        self._chunk_headers = chunk_headers

        self._chunk_size = len(chunks)
        self._chunks = {}
        self._chunk_cond = Condition()

        self._effective_threads = min(prefetch_threads, self._chunk_size)
        if self._effective_threads < 1:
            self._effective_threads = 1

        for idx, chunk in enumerate(chunks):
            logger.debug("queued chunk %d: rowCount=%s", idx,
                         chunk["rowCount"])
            self._chunks[idx] = SnowflakeChunk(
                url=chunk["url"],
                result_data=None,
                ready=False,
                row_count=int(chunk["rowCount"]),
            )

        logger.debug(
            "prefetch threads: %s, "
            "number of chunks: %s, "
            "effective threads: %s",
            prefetch_threads,
            self._chunk_size,
            self._effective_threads,
        )

        self._pool = ThreadPoolExecutor(self._effective_threads)

        self._downloading_chunks_lock = Lock()
        self._total_millis_downloading_chunks = 0
        self._total_millis_parsing_chunks = 0

        self._next_chunk_to_consume = 0

    def __init__(
        self,
        chunks,
        connection,
        cursor,
        qrmk,
        chunk_headers,
        query_result_format="JSON",
        prefetch_threads=DEFAULT_CLIENT_PREFETCH_THREADS,
    ):
        self._pre_init(
            chunks,
            connection,
            cursor,
            qrmk,
            chunk_headers,
            query_result_format=query_result_format,
            prefetch_threads=prefetch_threads,
        )
        logger.debug("Chunk Downloader in memory")
        for idx in range(self._effective_threads):
            self._pool.submit(self._download_chunk, idx)
        self._next_chunk_to_download = self._effective_threads

    def _download_chunk(self, idx):
        """Downloads a chunk asynchronously."""
        logger.debug("downloading chunk %s/%s", idx + 1, self._chunk_size)
        headers = {}
        if self._chunk_headers is not None:
            headers = self._chunk_headers
            logger.debug("use chunk headers from result")
        elif self._qrmk is not None:
            headers[SSE_C_ALGORITHM] = SSE_C_AES
            headers[SSE_C_KEY] = self._qrmk

        last_error = None
        backoff = DecorrelateJitterBackoff(1, 16)
        sleep_timer = 1
        for retry in range(10):
            try:
                logger.debug(
                    "started getting the result set %s: %s",
                    idx + 1,
                    self._chunks[idx].url,
                )
                result_data = self._fetch_chunk(self._chunks[idx].url, headers)
                logger.debug(
                    "finished getting the result set %s: %s",
                    idx + 1,
                    self._chunks[idx].url,
                )

                if isinstance(result_data, ResultIterWithTimings):
                    metrics = result_data.get_timings()
                    with self._downloading_chunks_lock:
                        self._total_millis_downloading_chunks += metrics[
                            ResultIterWithTimings.DOWNLOAD]
                        self._total_millis_parsing_chunks += metrics[
                            ResultIterWithTimings.PARSE]

                with self._chunk_cond:
                    self._chunks[idx] = self._chunks[idx]._replace(
                        result_data=result_data, ready=True)
                    self._chunk_cond.notify_all()
                    logger.debug("added chunk %s/%s to a chunk list.", idx + 1,
                                 self._chunk_size)
                break
            except Exception as e:
                last_error = e
                sleep_timer = backoff.next_sleep(1, sleep_timer)
                logger.exception(
                    "Failed to fetch the large result set chunk %s/%s for the %s th time, backing off for %s s",
                    idx + 1,
                    self._chunk_size,
                    retry + 1,
                    sleep_timer,
                )
                time.sleep(sleep_timer)
        else:
            self._downloader_error = last_error

    def next_chunk(self):
        """Gets the next chunk if ready."""
        logger.debug("next_chunk_to_consume={next_chunk_to_consume}, "
                     "next_chunk_to_download={next_chunk_to_download}, "
                     "total_chunks={total_chunks}".format(
                         next_chunk_to_consume=self._next_chunk_to_consume + 1,
                         next_chunk_to_download=self._next_chunk_to_download +
                         1,
                         total_chunks=self._chunk_size,
                     ))
        if self._next_chunk_to_consume > 0:
            # clean up the previously fetched data
            n = self._next_chunk_to_consume - 1
            self._chunks[n] = self._chunks[n]._replace(result_data=None,
                                                       ready=False)

            if self._next_chunk_to_download < self._chunk_size:
                self._pool.submit(self._download_chunk,
                                  self._next_chunk_to_download)
                self._next_chunk_to_download += 1

        if self._downloader_error is not None:
            raise self._downloader_error

        for attempt in range(MAX_RETRY_DOWNLOAD):
            logger.debug(
                "waiting for chunk %s/%s"
                " in %s/%s download attempt",
                self._next_chunk_to_consume + 1,
                self._chunk_size,
                attempt + 1,
                MAX_RETRY_DOWNLOAD,
            )
            done = False
            for wait_counter in range(MAX_WAIT):
                with self._chunk_cond:
                    if self._downloader_error:
                        raise self._downloader_error
                    if self._chunks[self._next_chunk_to_consume].ready:
                        done = True
                        break
                    logger.debug(
                        "chunk %s/%s is NOT ready to consume"
                        " in %s/%s(s)",
                        self._next_chunk_to_consume + 1,
                        self._chunk_size,
                        (wait_counter + 1) * WAIT_TIME_IN_SECONDS,
                        MAX_WAIT * WAIT_TIME_IN_SECONDS,
                    )
                    self._chunk_cond.wait(WAIT_TIME_IN_SECONDS)
            else:
                logger.debug(
                    "chunk %s/%s is still NOT ready. Restarting chunk "
                    "downloader threads",
                    self._next_chunk_to_consume + 1,
                    self._chunk_size,
                )
                self._pool.shutdown(wait=False)  # terminate the thread pool
                self._pool = ThreadPoolExecutor(self._effective_threads)
                for idx0 in range(self._effective_threads):
                    idx = idx0 + self._next_chunk_to_consume
                    self._pool.submit(self._download_chunk, idx)
            if done:
                break
        else:
            Error.errorhandler_wrapper(
                self._connection,
                self._cursor,
                OperationalError,
                {
                    "msg": "The result set chunk download fails or hang for "
                    "unknown reason.",
                    "errno": ER_CHUNK_DOWNLOAD_FAILED,
                },
            )
        logger.debug(
            "chunk %s/%s is ready to consume",
            self._next_chunk_to_consume + 1,
            self._chunk_size,
        )

        ret = self._chunks[self._next_chunk_to_consume]
        self._next_chunk_to_consume += 1
        return ret

    def terminate(self):
        """Terminates downloading the chunks."""
        if hasattr(self, u"_pool") and self._pool is not None:
            self._pool.shutdown()
            self._pool = None

    def __del__(self):
        try:
            self.terminate()
        except Exception:
            # ignore all errors in the destructor
            pass

    def _fetch_chunk(self, url, headers):
        """Fetch the chunk from S3."""
        handler = (JsonBinaryHandler(
            is_raw_binary_iterator=True) if self._query_result_format == "json"
                   else ArrowBinaryHandler(self._cursor, self._connection))

        return self._connection.rest.fetch(
            "get",
            url,
            headers,
            timeout=DEFAULT_REQUEST_TIMEOUT,
            is_raw_binary=True,
            binary_data_handler=handler,
        )