Example #1
0
 def __init__(self, files):
     Queue.__init__(self, 64)
     self.files = files
     self.worker = Thread(target=self.load_img)
     self.worker.setDaemon(True)
     self.worker.start()
     self.finished = False
Example #2
0
    def __init__(self, path, maxsize=0, chunksize=100, temp_subdir=False):
        self.path = path
        # temp_subdir is used for overriding temp file location, by
        # explicitly indicating that it must be a subdirectory of the
        # path used for persisting the elements. Reference:
        # https://github.com/balena/python-pqueue/issues/1
        self.path_temp = None
        if temp_subdir:
            self.path_temp = os.path.join(path, TEMP_SUBDIRECTORY)
            if not os.path.exists(self.path_temp):
                os.makedirs(self.path_temp)

        self.chunksize = chunksize
        SyncQ.__init__(self, maxsize)
        self.info = self._loadinfo()
        # truncate head case it contains garbage
        hnum, hcnt, hoffset = self.info['head']
        headfn = self._qfile(hnum)
        if os.path.exists(headfn):
            if hoffset < os.path.getsize(headfn):
                _truncate(headfn, hoffset)
        # let the head file open
        self.headf = self._openchunk(hnum, 'ab+')
        # let the tail file open
        tnum, _, toffset = self.info['tail']
        self.tailf = self._openchunk(tnum)
        self.tailf.seek(toffset)
        # update unfinished tasks with the current number of enqueued tasks
        self.unfinished_tasks = self.info['size']
        # optimize info file updates
        self.update_info = True
Example #3
0
 def __init__(self):
     ''' Initializes a new instance of the IterableQueue
     and registers this with the processor.
     '''
     Queue.__init__(self) # Queue is an old style class
     ShellProcessor.connections.append(self)
     _logger.debug("Registering a client for processing")
    def __init__(self):
        """ Initialize this class """

        # list of spawned threads
        self._threads = []
        # list of url tasks
        self._tasks = []
        self._cfg = objects.config
        # Maximum number of threads spawned
        self._numthreads = self._cfg.threadpoolsize
        self._timeout = self._cfg.timeout
        
        # Last thread report time
        self._ltrt = 0.0
        # Local buffer
        self.buffer = []
        # Data dictionary for multi-part downloads
        # Keys are URLs and value is the data
        self._multipartdata = {}
        # Status of URLs being downloaded in
        # multipart. Keys are URLs
        self._multipartstatus = {}
        # Flag that is set when one of the threads
        # in a multipart download fails
        self._multiparterror = False
        # Number of parts
        self._parts = self._cfg.numparts
        # Condition object
        self._cond = threading.Condition(threading.Lock())
        # Condition object for waiting on end condition
        self._endcond = threading.Condition(threading.Lock())
        Queue.__init__(self, self._numthreads + 5)
Example #5
0
    def __init__(self):
        """ Initialize this class """

        # list of spawned threads
        self._threads = []
        # list of url tasks
        self._tasks = []
        self._cfg = GetObject('config')
        # Maximum number of threads spawned
        self._numthreads = self._cfg.threadpoolsize
        self._timeout = self._cfg.timeout

        # Last thread report time
        self._ltrt = 0.0
        # Local buffer
        self.buffer = []
        # Data dictionary for multi-part downloads
        # Keys are URLs and value is the data
        self._multipartdata = {}
        # Status of URLs being downloaded in
        # multipart. Keys are URLs
        self._multipartstatus = {}
        # Number of parts
        self._parts = self._cfg.numparts
        # Data mode
        # 0 => Flush data
        # 1 => keep data in memory (default)
        # This mode is perpetuated to connector objects
        # and reader objects belonging to connectors. It
        # is not an attribute of this class
        self._datamode = 1
        if self._cfg.flushdata: self._datamode = 0
        # Condition object
        self._cond = threading.Condition(threading.Lock())
        Queue.__init__(self, self._numthreads + 5)
Example #6
0
    def __init__(self):
        """ Initialize this class """

        # list of spawned threads
        self.__threads = []
        # list of url tasks
        self.__tasks = []

        cfg = GetObject('config')
        # Maximum number of threads spawned
        self.__numthreads = cfg.threadpoolsize
        self.__timeout = cfg.timeout

        # Last thread report time
        self._ltrt = 0.0
        # Local buffer
        self.buffer = []
        # Data dictionary for multi-part downloads
        # Keys are URLs and value is the data
        self.__multipartdata = {}
        # Status of URLs being downloaded in
        # multipart. Keys are URLs
        self.__multipartstatus = {}
        # Number of parts
        self.__parts = GetObject('config').numparts
        # Condition object
        self._cond = threading.Condition(threading.Lock())
        Queue.__init__(self, self.__numthreads + 5)
Example #7
0
    def __init__(self):
        """ Initialize this class """

        # list of spawned threads
        self._threads = []
        # list of url tasks
        self._tasks = []
        self._cfg = objects.config
        # Maximum number of threads spawned
        self._numthreads = self._cfg.threadpoolsize
        self._timeout = self._cfg.timeout

        # Last thread report time
        self._ltrt = 0.0
        # Local buffer
        self.buffer = []
        # Data dictionary for multi-part downloads
        # Keys are URLs and value is the data
        self._multipartdata = {}
        # Status of URLs being downloaded in
        # multipart. Keys are URLs
        self._multipartstatus = {}
        # Flag that is set when one of the threads
        # in a multipart download fails
        self._multiparterror = False
        # Number of parts
        self._parts = self._cfg.numparts
        # Condition object
        self._cond = threading.Condition(threading.Lock())
        # Condition object for waiting on end condition
        self._endcond = threading.Condition(threading.Lock())
        # Monitor object, used with hget
        self._monitor = None

        Queue.__init__(self, self._numthreads + 5)
Example #8
0
 def __init__(self, maxsize=0, default_priority=1):
     Queue.__init__(self, 1)
     self._contains = 0
     self._max_buffer_size = maxsize
     self._default_priority = default_priority
     self._queues = list()
     self._queue_index = dict()
Example #9
0
 def __init__(self, *args):
     _Queue.__init__(self, *args)
     if PYTHON_VERSION[MAJOR] > 2 or PYTHON_VERSION[MINOR] > 2:
         self.__legacy = False
     else:
         self.__legacy = True
     return
Example #10
0
 def __init__(self, maxsize=0, default_priority=1):
     Queue.__init__(self, 1)
     self._contains = 0
     self._max_buffer_size = maxsize
     self._default_priority = default_priority
     self._queues = list()
     self._queue_index = dict()
Example #11
0
 def __init__(self, *args):
     _Queue.__init__(self, *args)
     if PYTHON_VERSION[MAJOR] > 2 or PYTHON_VERSION[MINOR] > 2:
         self.__legacy = False
     else:
         self.__legacy = True
     return
Example #12
0
 def __init__(self, url, name="default"):
     Queue.__init__(self)
     self.storage = Storage(name)
     self.storage.putQueue(url)
     self.readCount = 0
     self.lock = Lock()
     self.loadFromStorage()
Example #13
0
    def __init__(self, maxsize, itemLimit):
        Queue.__init__(self, maxsize)

        self.itemLimit = itemLimit
        self.itemCount = 0
        self.processed = 0
        self.completed = 0
Example #14
0
 def __init__(self, maxsize):
     assert maxsize > 0, 'maxsize > 0 required for Pool class'
     Queue.__init__(self, maxsize)
     for i in range(maxsize):
         thread = Thread(target=self._worker)
         thread.setDaemon(True)
         thread.start()
Example #15
0
    def __init__(self):
        """ Initialize this class """

        # list of spawned threads
        self._threads = []
        # list of url tasks
        self._tasks = []
        self._cfg = GetObject('config')
        # Maximum number of threads spawned
        self._numthreads = self._cfg.threadpoolsize
        self._timeout = self._cfg.timeout
        
        # Last thread report time
        self._ltrt = 0.0
        # Local buffer
        self.buffer = []
        # Data dictionary for multi-part downloads
        # Keys are URLs and value is the data
        self._multipartdata = {}
        # Status of URLs being downloaded in
        # multipart. Keys are URLs
        self._multipartstatus = {}
        # Number of parts
        self._parts = self._cfg.numparts
        # Data mode
        # 0 => Flush data
        # 1 => keep data in memory (default)
        # This mode is perpetuated to connector objects
        # and reader objects belonging to connectors. It
        # is not an attribute of this class
        self._datamode = 1
        if self._cfg.flushdata: self._datamode = 0
        # Condition object
        self._cond = threading.Condition(threading.Lock())        
        Queue.__init__(self, self._numthreads + 5)
Example #16
0
    def __init__(self):

        # super(InterruptibleQueue, self).__init__() # the F? doesnt work??? Queue not a new-style class i guess. f*****g python
        Queue.__init__(self)

        self._interrupt_consumers = False
        self._interrupt_producers = False
Example #17
0
    def __init__(self):
        """ Initialize this class """

        # list of spawned threads
        self.__threads = []
        # list of url tasks
        self.__tasks = []

        cfg = GetObject('config')
        # Maximum number of threads spawned
        self.__numthreads = cfg.threadpoolsize
        self.__timeout = cfg.timeout
        
        # Last thread report time
        self._ltrt = 0.0
        # Local buffer
        self.buffer = []
        # Data dictionary for multi-part downloads
        # Keys are URLs and value is the data
        self.__multipartdata = {}
        # Status of URLs being downloaded in
        # multipart. Keys are URLs
        self.__multipartstatus = {}
        # Number of parts
        self.__parts = GetObject('config').numparts
        # Condition object
        self._cond = threading.Condition(threading.Lock())        
        Queue.__init__(self, self.__numthreads + 5)
Example #18
0
    def __init__(self, store_path):
        Queue.__init__(self)
        self.store_path = store_path
        self.whitelist = WhiteList()
        self.blacklist = BlackList()

        self.checker = Checker(store_path)
Example #19
0
    def __init__(self, store_path):
        Queue.__init__(self)
        self.store_path = store_path
        self.whitelist = WhiteList()
        self.blacklist = BlackList()

        self.checker = Checker(store_path)
Example #20
0
 def __init__(self):
    Queue.__init__(self)
    self.display = None
    self.count = 0
    self.countLock = threading.Lock()
    self.moduleMapCount  = {}
    self.loader = loader.loader()
Example #21
0
 def __init__(self, maxsize=0, num_queues=None, weights=None, allow_resize=False, auto_recycle=0xFFFFFFF):
     """ Create a new MultiQueue.
     
     maxsize -- maximum number of elements in any queue
     num_queues -- initial number of queues
     weights -- list of positive integers with the weight of each queue, bigger is more important
     allow_resize -- False by default is a num_queues or a weights list, only can be True in other cases 
     """
     Queue.__init__(self, maxsize)
     
     # empty object
     self.num_queues = self.size = self.order_number = self.cycle = 0
     self.queues = []
     self.sizes = []
     self.active_queues = []
     self.wait = []
     
     # initialize queues arrays and weights 
     if weights:
         num_queues = len(weights)            
     elif num_queues:
         weights = [1]*num_queues
     
     if num_queues:
         self.wait = weights
         self.max_weight = max(self.wait)
         self._resize_queues(num_queues)
         self.wait = [self.max_weight-w+1 for w in self.wait] # converts weights in waiting times
         self.allow_resize = allow_resize
     else:
         self.max_weight = 1
         # without number of queues or a weights array, must allow queues resize
         self.allow_resize = True
     self.auto_recycle = auto_recycle
Example #22
0
    def __init__(self, db_base_dir=None, on_scan_complete=None,
                 extra_module_dirs=None, env=None,
                 db_event_reporter=None, db_catalog_dirs=None,
                 db_import_everything_langs=None):
        """Create a CodeIntel manager.

            "db_base_dir" (optional) specifies the base directory for
                the codeintel database. If not given it will default to
                '~/.codeintel'.
            "on_scan_complete" (optional) is a callback for Citadel scan
                completion. It will be passed the ScanRequest instance
                as an argument.
            "extra_module_dirs" (optional) is a list of extra dirs
                in which to look for and use "codeintel_*.py"
                support modules (and "lang_*.py" modules, DEPRECATED).
            "env" (optional) is an Environment instance (or subclass).
                See environment.py for details.
            "db_event_reporter" (optional) is a callback that will be called
                    db_event_reporter(<event-desc-string>)
                before "significant" long processing events in the DB. This
                may be useful to forward to a status bar in a GUI.
            "db_catalog_dirs" (optional) is a list of catalog dirs in
                addition to the std one to use for the CatalogsZone. All
                *.cix files in a catalog dir are made available.
            "db_import_everything_langs" (optional) is a set of langs for which
                the extra effort to support Database
                `lib.hits_from_lpath()' should be made. See class
                Database for more details.
        """
        threading.Thread.__init__(self, name="CodeIntel Manager")
        self.setDaemon(True)
        Queue.__init__(self)

        self.citadel = Citadel(self)

        # Module registry bits.
        self._registered_module_canon_paths = set()
        self.silvercity_lexer_from_lang = {}
        self.buf_class_from_lang = {}
        self.langintel_class_from_lang = {}
        self._langintel_from_lang_cache = {}
        self.import_handler_class_from_lang = {}
        self._is_citadel_from_lang = {
        }  # registered langs that are Citadel-based
        self._is_cpln_from_lang = {
        }  # registered langs for which completion is supported
        self._hook_handlers_from_lang = defaultdict(list)

        self.env = env or DefaultEnvironment()
        # The database must be enabled before registering modules.
        self.db = Database(self, base_dir=db_base_dir,
                           catalog_dirs=db_catalog_dirs,
                           event_reporter=db_event_reporter,
                           import_everything_langs=db_import_everything_langs)

        self.lidb = langinfo.get_default_database()
        self._register_modules(extra_module_dirs)

        self.idxr = indexer.Indexer(self, on_scan_complete)
Example #23
0
    def __init__(self, *qargs, **qkeys):
        """Construct an execution queue.

        `qargs` and `qkeys` are passed to the underlying `Queue` object.
        """
        Queue.__init__(self, *qargs, **qkeys)
        self.__current_exec = None
        self.__exec_mutex = threading.Lock()
	def __init__(self, filename):
		self.is_first = True
		self.connect_mutex = Lock()
		self.shared_data = {}
		self.filename = filename
		self.ack_index = 0
		Queue.__init__(self) # initialized with maxsize = 0. Assuming that for all the operations
		self.connect(self.is_first)
Example #25
0
 def __init__(self, username=None, api_key=None, **kwargs):
     auth = kwargs.get('auth', None)
     self.timeout = kwargs.get('timeout', 5)
     self.connargs = {'username': username,
                      'api_key': api_key,
                      'auth': auth}
     poolsize = kwargs.get('poolsize', 10)
     Queue.__init__(self, poolsize)
Example #26
0
    def __init__(self, path, *args, **kwargs):
        """
        Initialize the persistent queue

        :param path: path to sqlite database file
        """
        self.path = path
        Queue.__init__(self, *args, **kwargs)
Example #27
0
    def __init__(self, continueRunningCheck=None, checkFrequency=None):
        Queue.__init__(self)

        if continueRunningCheck is None:
            continueRunningCheck = lambda: True

        self.continue_running_check = continueRunningCheck
        self.check_frequency = checkFrequency
Example #28
0
 def __init__(self, maxsize, isDaemon=False):
     self.__stopevent = threading.Event()
     assert maxsize > 0, 'maxsize > 0 required for ThreadQueue class'
     Queue.__init__(self, maxsize)
     for i in xrange(maxsize):
         thread = Thread(target=self._worker)
         thread.setDaemon(isDaemon)
         thread.start()
Example #29
0
    def __init__(self, maxsize=10):
        """
        Class constructor. Instantiates a new :class:`.XBeeQueue` with the provided parameters.

        Args:
            maxsize (Integer, default: 10) the maximum size of the queue.
        """
        Queue.__init__(self, maxsize)
 def __init__(self, maxsize, isDaemon=False):
     self.__stopevent = threading.Event()
     assert maxsize > 0, 'maxsize > 0 required for ThreadQueue class'
     Queue.__init__(self, maxsize)
     for i in xrange(maxsize):
         thread = Thread(target = self._worker)
         thread.setDaemon(isDaemon)
         thread.start()
Example #31
0
    def __init__(self):
        Queue.__init__(self)

        def message_arrived_handler(event):
            self.put(event.data)

        event.mainloop.register_event_callback('dbus-message-received',
                                               message_arrived_handler)
Example #32
0
    def __init__(self, continueRunningCheck=None, checkFrequency=None):
        Queue.__init__(self)

        if continueRunningCheck is None:
            continueRunningCheck = lambda: True

        self.continue_running_check = continueRunningCheck
        self.check_frequency = checkFrequency
Example #33
0
    def __init__(self, *qargs, **qkeys):
        """Construct an execution queue.

        `qargs` and `qkeys` are passed to the underlying `Queue` object.
        """
        Queue.__init__(self, *qargs, **qkeys)
        self.__current_exec = None
        self.__exec_mutex = threading.Lock()
Example #34
0
 def __init__(self, maxsize=0):
     """
     :type maxsize: int
     :param maxsize: Set the maximum number of items to be held in the buffer
     """
     Queue.__init__(self, maxsize)
     self._contains = 0
     self._temp_file = tempfile.NamedTemporaryFile(suffix=".queue")
     self._init_fds()
Example #35
0
 def __init__(self, maxsize=0):
     """
     :type maxsize: int
     :param maxsize: Set the maximum number of items to be held in the buffer
     """
     Queue.__init__(self, maxsize)
     self._contains = 0
     self._temp_file = tempfile.NamedTemporaryFile(suffix=".queue")
     self._init_fds()
Example #36
0
    def __init__(self, db_base_dir=None, on_scan_complete=None,
                 extra_module_dirs=None, env=None,
                 db_event_reporter=None, db_catalog_dirs=None,
                 db_import_everything_langs=None):
        """Create a CodeIntel manager.
        
            "db_base_dir" (optional) specifies the base directory for
                the codeintel database. If not given it will default to
                '~/.codeintel'.
            "on_scan_complete" (optional) is a callback for Citadel scan
                completion. It will be passed the ScanRequest instance
                as an argument.
            "extra_module_dirs" (optional) is a list of extra dirs
                in which to look for and use "codeintel_*.py"
                support modules (and "lang_*.py" modules, DEPRECATED).
            "env" (optional) is an Environment instance (or subclass).
                See environment.py for details.
            "db_event_reporter" (optional) is a callback that will be called
                    db_event_reporter(<event-desc-string>)
                before "significant" long processing events in the DB. This
                may be useful to forward to a status bar in a GUI.
            "db_catalog_dirs" (optional) is a list of catalog dirs in
                addition to the std one to use for the CatalogsZone. All
                *.cix files in a catalog dir are made available.
            "db_import_everything_langs" (optional) is a set of langs for which
                the extra effort to support Database
                `lib.hits_from_lpath()' should be made. See class
                Database for more details.
        """
        threading.Thread.__init__(self, name="CodeIntel Manager")
        self.setDaemon(True)
        Queue.__init__(self)

        self.citadel = Citadel(self)

        # Module registry bits.
        self._registered_module_canon_paths = set()
        self.silvercity_lexer_from_lang = {}
        self.buf_class_from_lang = {}
        self.langintel_class_from_lang = {}
        self._langintel_from_lang_cache = {}
        self.import_handler_class_from_lang = {}
        self._is_citadel_from_lang = {} # registered langs that are Citadel-based
        self._is_cpln_from_lang = {} # registered langs for which completion is supported
        self._hook_handlers_from_lang = defaultdict(list)

        self.env = env or DefaultEnvironment() 
        # The database must be enabled before registering modules.
        self.db = Database(self, base_dir=db_base_dir,
                           catalog_dirs=db_catalog_dirs,
                           event_reporter=db_event_reporter,
                           import_everything_langs=db_import_everything_langs)

        self.lidb = langinfo.get_default_database()
        self._register_modules(extra_module_dirs)

        self.idxr = indexer.Indexer(self, on_scan_complete)
Example #37
0
 def __init__(self, maxsize):
     """
     @param maxsize: means the max length of queue
     @type maxsize: int type
     
     """
     Queue.__init__(self, maxsize)
     self.maxsize = maxsize
     self.queue = []
Example #38
0
    def __init__(self, queries = [], files = [], directories = []):
        WriteDebug('Initalzing SingleInputsQueue: %s' % self)
        Queue.__init__(self)

        for single_input in getSingleInputsFromAllTypes\
            (queries, files, directories):
            self.putSingleInput(single_input)

        WriteDebug('Finished intialzing the SingleInputsQueue: %s' % self)
Example #39
0
 def __init__(self, id, name, capacity=float('inf'), entityData={'_class':'Dream.Part'}, threshold=2, 
              initialWIPLevel=2,**kw):
     Queue.__init__(self, id=id,name=name, capacity=capacity)
     # the threshold under which a new Entity will be created
     self.threshold=int(threshold)
     # the number of Entities in the start of simulation
     self.initialWIPLevel=int(initialWIPLevel)
     # the data of the Entities (dictionary)
     self.entityData=dict(entityData)
Example #40
0
 def __init__(self, username=None, api_key=None, **kwargs):
     auth = kwargs.get('auth', None)
     self.timeout = kwargs.get('timeout', 5)
     self.connargs = {
         'username': username,
         'api_key': api_key,
         'auth': auth
     }
     poolsize = kwargs.get('poolsize', 10)
     Queue.__init__(self, poolsize)
Example #41
0
 def __init__(self, uniqness=True, maxsize=0):
     _Queue.__init__(self, maxsize)
     self.uniqness = uniqness
     self.maxsize = maxsize
     self._qset_lock = None
     if self.uniqness:
         self._qset_lock = _ThreadLock()
         self._qset = set()
     self.enq = self.put_nowait
     self.deq = self.get_nowait
Example #42
0
	def __init__(self, uniqness=True, maxsize=0):
		_Queue.__init__(self, maxsize)
		self.uniqness = uniqness
		self.maxsize = maxsize
		self._qset_lock = None
		if self.uniqness:
			self._qset_lock = _ThreadLock()
			self._qset = set()
		self.enq = self.put_nowait
		self.deq = self.get_nowait
Example #43
0
    def __init__(self, maxsize=0, timeout=None, name=None):
        # extend the base class
        Queue.__init__(self, maxsize)
        self.timeout = timeout
        logger_name = __name__
        self.name = name
        if name is not None:
            logger_name = '{}.{}'.format(logger_name, name)

        self._logger = logging.getLogger(logger_name)
Example #44
0
 def __init__(self,size=1,maxjobs=0,workerFactory=DefaultWorkerFactory):
     if not callable(workerFactory):
         raise TypeError("workerFactory must be callable")
     self.workerFactory=workerFactory#新建worker
     self._size=0#活动的worker数
     
     Queue.__init__(self, maxjobs)
     self._jobs=self
     
     for i in xrange(size):
         self.grow()
Example #45
0
 def __init__(self, id, name, capacity=float("inf"), intervalCapacity=[], schedulingRule="FIFO", gatherWipStat=False,
              sharedResources={}, **kw):
     Queue.__init__(self, id, name, capacity=capacity)
     # a list that holds the capacity (manhours) that is available in each interval
     self.intervalCapacity=intervalCapacity
     # a list that holds the capacity (manhours) that is available in each interval for the remaining time
     self.remainingIntervalCapacity=list(self.intervalCapacity)
     # blocks the entry of the capacity station, so that it can be manipulated to accept only in certain moments of simulation time
     self.isLocked=True
     # dict that holds information if station shares workpower with some other station
     self.sharedResources=sharedResources
Example #46
0
 def __init__(self, persistence_path, queue_name, debug = False):
     """
     Create a new PersistentQueue at ``persistence_path/queue_name``.
     If a queue log exists at that path, the Queue will be loaded from
     disk before being available for use.
     """
     self.persistence_path = persistence_path
     self.queue_name = queue_name
     self.transaction_lock = thread.allocate_lock()
     self.total_items = 0
     Queue.__init__(self, 0)
     self.initial_bytes = self._replay_transaction_log(debug)
Example #47
0
 def __init__(self, persistence_path, queue_name, debug=False):
     """
     Create a new PersistentQueue at ``persistence_path/queue_name``.
     If a queue log exists at that path, the Queue will be loaded from
     disk before being available for use.
     """
     self.persistence_path = persistence_path
     self.queue_name = queue_name
     self.transaction_lock = thread.allocate_lock()
     self.total_items = 0
     Queue.__init__(self, 0)
     self.initial_bytes = self._replay_transaction_log(debug)
Example #48
0
    def __init__(self, wbxmlBytes):

        self.bytesDequeued = 0
        self.bytesEnqueued = 0

        Queue.__init__(self)

        for byte in wbxmlBytes:
            self.put(ord(byte))
            self.bytesEnqueued += 1

        logging.debug("Array byte count: %d, enqueued: %d" %
                      (self.qsize(), self.bytesEnqueued))
    def __init__(self, wbxmlBytes):
        
        self.bytesDequeued = 0
        self.bytesEnqueued = 0
        
        Queue.__init__(self)

        for byte in wbxmlBytes:
            self.put(ord(byte))
            self.bytesEnqueued += 1
        
        
        logging.debug("Array byte count: %d, enqueued: %d" % (self.qsize(), self.bytesEnqueued))
Example #50
0
 def __init__(self,*argv,**kw):
     Queue.__init__(self)
     self.lock = threading.Lock()
     self.__cache_size = kw.get("cache_size" , 1000)
     self.__cache_dir = kw.get("cache_dir", "./.cache_dir/")
     self.__cache_file_prefix = kw.get("cache_prefix" ,"cache")
     file2.mkdir_p(self.__cache_dir)
     self.__read_index = 0
     self.__write_index = 0
     self.__write_file_count = 0
     self.__write_file_handle = None
     self.__write_count = 0
     self.__read_count = 0
Example #51
0
 def __init__(self, file_path, formatter):
     """Initialize the journal with a database file, formatter object, and thread."""
     Queue.__init__(self)
     self.file_path = file_path
     self.formatter = formatter
     self.file = None
     self.inode = None
     self.count = 0
     self.byte_size = 0
     self.pos = 0
     self.open()
     self.thread = Thread(target=self.worker)
     self.thread.daemon = True
     self.thread.start()
Example #52
0
    def __init__(self):
        if getattr(self, "wque", None):
            Queue.__init__(self)
        wait = getattr(self, "wait", 0)
        self.dbg("Setting Up Scheduler with Delay = %d" % wait)
        reactor.callLater(wait, self._setuploop)

        each = getattr(self, "each", 0)
        self.each = each
        errb = getattr(self, "errb", self._errb)
        self.errb = errb

        self.launched = 0
        self.blocked = False
Example #53
0
 def __init__(self, poolType, poolLimit, dbClass, *args, **kwargs):
   """ poolLimit: if elastic, this is the min connection number, otherwise this is the 
       max number of connection """
   self.poolType= self._dbPoolType or poolType
   self.dbClass= self._dbClass or dbClass
   self.args= self._dbArgs or args
   self.kwargs= self._dbKwargs or kwargs
   self.dbifaces= 0
   self.poolLimit= self._dbPoolLimit or poolLimit
   # We Need a Lock because, altough Queue is thread safe,
   # we want a full control on queue size
   self.mpoolingLock= Lock()
   self.dbemployed= []
   Queue.__init__(self)
    def __init__(self, size=1, maxjobs=0, worker_factory=default_worker_factory):
        if not callable(worker_factory):
            raise TypeError("worker_factory must be callable")

        self.worker_factory = worker_factory # Used to build new workers
        self._size = 0 # Number of active workers we have

        # Initialize the Queue
        Queue.__init__(self, maxjobs) # The queue contains job that are read by workers
        self._jobs = self # Pointer to the queue, for backwards compatibility with version 0.9.1 and earlier

        # Hire some workers!
        for i in xrange(size):
            self.grow()
 def __init__(self, nworkers, maxlarge, logger, email, branch):
     Queue.__init__(self)
     info('Initializing scheduler with %d jobs.', nworkers)
     self.nworkers = nworkers
     self.logger = logger
     self.maxlarge = maxlarge
     self.nlarge = 0  # not thread safe, don't really care right now
     self.passed = 0
     self.failed = 0
     self.workers = []
     self.stopping = Event()
     self.timer = None
     self.error = None
     self.email = email
     self.branch = branch
Example #56
0
    def __init__ (self, setTrue = True, threadClass = Thread):
        if not issubclass(threadClass, Thread):
            raise ValueError("Only subclasses of threading.Thread are allowed.")
        
        Queue.__init__(self)
        self.event = Event()
        self.counter = 0
        self.threadClass = threadClass

        if setTrue:
            self.event.set() # true at the beginning

        waitingThread = self.threadClass(name = "Waiting-Queue-Thread", target = self.runThread)
        waitingThread.setDaemon(True)
        waitingThread.start()
Example #57
0
 def __init__(self, nworkers, maxlarge, logger, email, branch):
     Queue.__init__(self)
     info('Initializing scheduler with %d jobs.', nworkers)
     self.nworkers = nworkers
     self.logger = logger
     self.maxlarge = maxlarge
     self.nlarge = 0  # not thread safe, don't really care right now
     self.passed = 0
     self.failed = 0
     self.workers = []
     self.stopping = Event()
     self.timer = None
     self.error = None
     self.email = email
     self.branch = branch