def __init__(self): self.event_queue = queue.PriorityQueue() if variables.OVN_MODE == "overlay": self.mode = ovn_k8s.modes.overlay.OvnNB() else: vlog.emer("OVN mode not defined.") sys.exit(1)
def __init__(self): self.event_queue = queue.PriorityQueue() if config.get_option('ovn_mode') == "overlay": self.mode = ovn_k8s.modes.overlay.OvnNB() else: vlog.emer("OVN mode not defined.") sys.exit(1)
def __init__(self, driver, size, workers_threads_count=4, initializer=None, min_pool_size=0): self._lock = threading.RLock() self._waiters = collections.OrderedDict() self._driver = driver self._active_queue = queue.PriorityQueue() self._active_count = 0 self._size = size self._req_settings = settings.BaseRequestSettings().with_timeout(3) self._tp = futures.ThreadPoolExecutor(workers_threads_count) self._initializer = initializer self._should_stop = threading.Event() self._keep_alive_threshold = 4 * 60 self._spin_timeout = 30 self._event_queue = queue.Queue() self._driver_await_timeout = 3 self._event_loop_thread = threading.Thread(target=self.events_loop) self._event_loop_thread.daemon = True self._event_loop_thread.start() self._logger = logger.getChild(self.__class__.__name__) self._min_pool_size = min_pool_size self._terminating = False if self._min_pool_size > self._size: raise ValueError("Invalid min pool size value!") for _ in range(self._min_pool_size): self._prepare(self._create())
def __init__(self, num_workers, q_size=0, resq_size=0, poll_timeout=5): """Set up the thread pool and start num_workers worker threads. ``num_workers`` is the number of worker threads to start initially. If ``q_size > 0`` the size of the work *request queue* is limited and the thread pool blocks when the queue is full and it tries to put more work requests in it (see ``putRequest`` method), unless you also use a positive ``timeout`` value for ``putRequest``. If ``resq_size > 0`` the size of the *results queue* is limited and the worker threads will block when the queue is full and they try to put new results in it. .. warning: If you set both ``q_size`` and ``resq_size`` to ``!= 0`` there is the possibilty of a deadlock, when the results queue is not pulled regularly and too many jobs are put in the work requests queue. To prevent this, always set ``timeout > 0`` when calling ``ThreadPool.putRequest()`` and catch ``Queue.Full`` exceptions. """ self._requests_queue = queue.PriorityQueue(q_size) self._results_queue = queue.Queue(resq_size) self.workers = [] self.dismissedWorkers = [] self.workRequests = {} self.createWorkers(num_workers, poll_timeout)
def __init__(self, id): self._id = id if id not in self._masters: self._masters[id] = self self._queue = Queue.PriorityQueue(-1) self._master = self._masters[id]
def __init__(self, settings, sender=None): self._logdirs = {} self._consumer = None self._settings = settings self._sender = sender # TODO(jhr): do we need locking in this queue? self._watcher_queue = queue.PriorityQueue() wandb.tensorboard.reset_state()
def __init__(self, settings, run_proto, interface): self._logdirs = {} self._consumer = None self._settings = settings self._interface = interface self._run_proto = run_proto # TODO(jhr): do we need locking in this queue? self._watcher_queue = queue.PriorityQueue() wandb.tensorboard.reset_state()
def __init__(self, max_threads=3): # priority queue picks things with a lesser value first self._submission_queue = queue.PriorityQueue() self._submission_set = set() self._threads_semaphore = threading.BoundedSemaphore(max_threads) self._thread = threading.Thread(None, self.__run, 'ExecutorQueue.__run') self._thread.daemon = True self._thread.start()
def __init__(self, size=10, default_timeout=10, ping_interval=3000, labels=None): super(PingingPool, self).__init__(labels=labels) self.size = size self.default_timeout = default_timeout self._delta = datetime.timedelta(seconds=ping_interval) self._sessions = queue.PriorityQueue(size)
def __init__(self, *components): self.event_queue = queue.PriorityQueue() for component in components: self.event_queue.put( Scheduler.Event( component.offset, component, True, ))
def _traces(self, *args, **kwargs): q = queue.PriorityQueue() # add a little bit of noise to the priority to break ties... q.put((torch.zeros(1).item() - torch.rand(1).item() * 1e-2, poutine.Trace())) q_fn = pqueue(self.model, queue=q) for i in range(self.num_samples): if q.empty(): # num_samples was too large! break tr = poutine.trace(q_fn).get_trace(*args, **kwargs) # XXX should block yield tr, tr.log_prob_sum()
def __init__(self, image, target=None, seed_vox=None, mask=None, sparse_mask=False, block_padding=None): self.block_padding = block_padding self.MOVE_DELTA = CONFIG.model.move_step self.queue = queue.PriorityQueue() self.visited = set() self.image = image self.bounds = np.array(image.shape, dtype=np.int64) if seed_vox is None: self.MOVE_GRID_OFFSET = np.array([0, 0, 0], dtype=np.int64) else: self.MOVE_GRID_OFFSET = np.mod(seed_vox, self.MOVE_DELTA).astype(np.int64) self.move_bounds = ( np.ceil(np.true_divide((CONFIG.model.input_fov_shape - 1) // 2 - self.MOVE_GRID_OFFSET, self.MOVE_DELTA)).astype(np.int64), self.vox_to_pos(np.array(self.bounds) - 1 - (CONFIG.model.input_fov_shape - 1) // 2), ) self.move_check_thickness = CONFIG.model.move_check_thickness if mask is None: if isinstance(self.image, OctreeVolume): self.mask = OctreeVolume(self.image.leaf_shape, (np.zeros(3), self.bounds), 'float32') self.mask[:] = np.NAN elif sparse_mask: self.mask = OctreeVolume(CONFIG.model.training_subv_shape, (np.zeros(3), self.bounds), 'float32') self.mask[:] = np.NAN else: self.mask = np.full(self.bounds, np.NAN, dtype=np.float32) else: self.mask = mask self.target = target self.bias_against_merge = False self.move_based_on_new_mask = False self.prioritize_proximity = CONFIG.model.move_priority == 'proximity' self.proximity = {} if seed_vox is None: seed_pos = np.floor_divide(self.move_bounds[0] + self.move_bounds[1], 2) else: seed_pos = self.vox_to_pos(seed_vox) assert self.pos_in_bounds(seed_pos), \ 'Seed position (%s) must be in region move bounds (%s, %s).' % \ (seed_vox, self.move_bounds[0], self.move_bounds[1]) self.seed_pos = seed_pos self.queue.put((None, seed_pos)) self.proximity[tuple(seed_pos)] = 1 self.seed_vox = self.pos_to_vox(seed_pos) if self.target is not None: self.target_offset = (self.bounds - self.target.shape) // 2 assert np.isclose(self.target[tuple(self.seed_vox - self.target_offset)], CONFIG.model.v_true), \ 'Seed position should be in target body.' self.mask[tuple(self.seed_vox)] = CONFIG.model.v_true self.visited.add(tuple(self.seed_pos))
def __init__( self, settings: "SettingsStatic", run_proto: "RunRecord", interface: "BackendSender", ) -> None: self._logdirs = {} self._consumer = None self._settings = settings self._interface = interface self._run_proto = run_proto # TODO(jhr): do we need locking in this queue? self._watcher_queue = queue.PriorityQueue() wandb.tensorboard.reset_state()
def __init__(self, *args, **kwargs): super(AuthenticatorOut, self).__init__(*args, **kwargs) self._root = (tempfile.mkdtemp() if self.conf("test-mode") else "/tmp/certbot") self._httpd = None self._start_time = calendar.timegm(time.gmtime()) self._handler_file_problem = False # Set up reverter self.reverter = reverter.Reverter(self.config) self.reverter.recovery_routine() # Reporter self.orig_reporter = None self.messages = queue.PriorityQueue()
def __init__(self, driver, size, workers_threads_count=4, initializer=None): self._lock = threading.RLock() self._waiters = collections.OrderedDict() self._driver = driver self._active_queue = queue.PriorityQueue() self._active_count = 0 self._size = size self._req_settings = settings.BaseRequestSettings().with_timeout(3) self._tp = futures.ThreadPoolExecutor(workers_threads_count) self._initializer = initializer self._should_stop = threading.Event() self._keep_alive_threshold = 4 * 60 self._spin_timeout = 30 self._pool_thread = _PoolThread(self) self._pool_thread.start() self._logger = logger.getChild(self.__class__.__name__)
def __init__(self, rp_url, session, api_version, launch_id, project_name, log_batch_size=20, verify_ssl=True): """Initialize instance attributes. :param rp_url: Report portal URL :param session: HTTP Session object :param api_version: RP API version :param launch_id: Parent launch UUID :param project_name: RP project name :param log_batch_size: The amount of log objects that need to be gathered before processing :param verify_ssl: Indicates that it is necessary to verify SSL certificates within HTTP request """ self._lock = Lock() self._logs_batch = [] self._worker = None self.api_version = api_version self.command_queue = queue.Queue() self.data_queue = queue.PriorityQueue() self.launch_id = launch_id self.log_batch_size = log_batch_size self.project_name = project_name self.rp_url = rp_url self.session = session self.verify_ssl = verify_ssl self._log_endpoint = ( '{rp_url}/api/{version}/{project_name}/log'.format( rp_url=rp_url, version=self.api_version, project_name=self.project_name))
def __init__(self, maxsize=0, key=None): self._key = key if key else lambda item: item self._queue = queue.PriorityQueue(maxsize=maxsize)
from six.moves import queue from localstack.config import TMP_FOLDER from localstack.constants import API_ENDPOINT from localstack.utils.common import (JsonObject, to_str, timestamp, short_uid, save_file, FuncThread, load_file) from localstack.utils.common import safe_requests as requests PROCESS_ID = short_uid() MACHINE_ID = None # event type constants EVENT_START_INFRA = 'infra.start' # sender thread and queue SENDER_THREAD = None EVENT_QUEUE = queue.PriorityQueue() class AnalyticsEvent(JsonObject): def __init__(self, **kwargs): self.t = kwargs.get('timestamp') or kwargs.get('t') or timestamp() self.m_id = kwargs.get('machine_id') or kwargs.get( 'm_id') or get_machine_id() self.p_id = kwargs.get('process_id') or kwargs.get( 'p_id') or get_process_id() self.e_t = kwargs.get('event_type') or kwargs.get('e_t') self.p = kwargs.get('payload') if kwargs.get( 'payload') is not None else kwargs.get('p') def timestamp(self): return self.t
def __init__(self): self.timers = queue.PriorityQueue()
def __init__(self, config): self.messages = queue.PriorityQueue( ) # type: queue.PriorityQueue[Reporter._msg_type] self.config = config
def __init__(self, config): self.messages = queue.PriorityQueue() self.config = config
def __init__(self, size=10, default_timeout=10, ping_interval=3000): self.size = size self.default_timeout = default_timeout self._delta = datetime.timedelta(seconds=ping_interval) self._sessions = queue.PriorityQueue(size)
def __init__(self, endpoint=None, token=None, solver=None, proxy=None, permissive_ssl=False, **kwargs): """To setup the connection a pipeline of queues/workers is constructed. There are five interactions with the server the connection manages: 1. Downloading solver information. 2. Submitting problem data. 3. Polling problem status. 4. Downloading problem results. 5. Canceling problems Loading solver information is done synchronously. The other four tasks are performed by asynchronously workers. For 2, 3, and 5 the workers gather tasks in batches. """ if not endpoint or not token: raise ValueError("Endpoint URL and/or token not defined") _LOGGER.debug("Creating a client for endpoint: %r", endpoint) self.endpoint = endpoint self.token = token self.default_solver = solver # Create a :mod:`requests` session. `requests` will manage our url parsing, https, etc. self.session = requests.Session() self.session.headers.update({ 'X-Auth-Token': self.token, 'User-Agent': self.USER_AGENT }) self.session.proxies = {'http': proxy, 'https': proxy} if permissive_ssl: self.session.verify = False # Build the problem submission queue, start its workers self._submission_queue = queue.Queue() self._submission_workers = [] for _ in range(self._SUBMISSION_THREAD_COUNT): worker = threading.Thread(target=self._do_submit_problems) worker.daemon = True worker.start() self._submission_workers.append(worker) # Build the cancel problem queue, start its workers self._cancel_queue = queue.Queue() self._cancel_workers = [] for _ in range(self._CANCEL_THREAD_COUNT): worker = threading.Thread(target=self._do_cancel_problems) worker.daemon = True worker.start() self._cancel_workers.append(worker) # Build the problem status polling queue, start its workers self._poll_queue = queue.PriorityQueue() self._poll_workers = [] for _ in range(self._POLL_THREAD_COUNT): worker = threading.Thread(target=self._do_poll_problems) worker.daemon = True worker.start() self._poll_workers.append(worker) # Build the result loading queue, start its workers self._load_queue = queue.Queue() self._load_workers = [] for _ in range(self._LOAD_THREAD_COUNT): worker = threading.Thread(target=self._do_load_results) worker.daemon = True worker.start() self._load_workers.append(worker) # Prepare an empty set of solvers self._solvers = {} self._solvers_lock = threading.RLock() self._all_solvers_ready = False # Set the parameters for requests; disable SSL verification if needed self._request_parameters = {} if permissive_ssl: self._request_parameters['verify'] = False
def __init__(self, db_driver): super(NbApi, self).__init__() self.driver = db_driver self.controller = None self._queue = Queue.PriorityQueue() self.db_apply_failed = False
def multi_upload(self): """ Performs multipart uploads. It initiates the multipart upload. It creates a queue ``part_queue`` which is directly responsible with controlling the progress of the multipart upload. It then creates ``UploadPartTasks`` for threads to run via the ``executer``. This fucntion waits for all of the parts in the multipart upload to finish, and then it completes the multipart upload. This method waits on its parts to finish. So, threads are required to process the parts for this function to complete. """ part_queue = NoBlockQueue(self.interrupt) complete_upload_queue = Queue.PriorityQueue() part_counter = MultiCounter() counter_lock = threading.Lock() bucket, key = find_bucket_key(self.dest) params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key} if self.parameters['acl']: params['acl'] = self.parameters['acl'][0] if self.parameters['guess_mime_type']: self._inject_content_type(params, self.src) response_data, http = operate(self.service, 'CreateMultipartUpload', params) upload_id = response_data['UploadId'] size_uploads = self.chunksize num_uploads = int(math.ceil(self.size / float(size_uploads))) for i in range(1, (num_uploads + 1)): part_info = (self, upload_id, i, size_uploads) part_queue.put(part_info) task = UploadPartTask(session=self.session, executer=self.executer, part_queue=part_queue, dest_queue=complete_upload_queue, region=self.region, printQueue=self.printQueue, interrupt=self.interrupt, part_counter=part_counter, counter_lock=counter_lock) self.executer.submit(task) part_queue.join() # The following ensures that if the multipart upload is in progress, # all part uploads finish before aborting or completing. This # really only applies when an interrupt signal is sent because the # ``part_queue.join()`` ensures this if the process is not # interrupted. while part_counter.count: time.sleep(0.1) parts_list = [] while not complete_upload_queue.empty(): part = complete_upload_queue.get() parts_list.append(part[1]) if len(parts_list) == num_uploads: parts = {'Parts': parts_list} params = { 'endpoint': self.endpoint, 'bucket': bucket, 'key': key, 'upload_id': upload_id, 'multipart_upload': parts } operate(self.service, 'CompleteMultipartUpload', params) else: abort_params = { 'endpoint': self.endpoint, 'bucket': bucket, 'key': key, 'upload_id': upload_id } operate(self.service, 'AbortMultipartUpload', abort_params) raise Exception()
def __init__(self): self._queue = Queue.PriorityQueue()
def __init__(self): self.messages = queue.PriorityQueue()