예제 #1
0
        def test_queue_size(self):
            queue = RedisQueue("test-queue-size-1", maxsize=1)
            queue.put(1)
            self.assertRaises(six.moves.queue.Full, queue.put, 1)

            queue = RedisQueue("test-queue-size-2", maxsize=2)
            queue.put(1)
            queue.put(1)
            self.assertRaises(six.moves.queue.Full, queue.put, 1)
            queue.get()
            queue.get()
            self.assertRaises(six.moves.queue.Empty, queue.get_nowait)
예제 #2
0
        def test_queue_size(self):
            print(REDIS_HOST)
            print(os.getenv('REDIS_PORT_6379_TCP_ADDR'))
            queue = RedisQueue("test-queue-size-1", maxsize=1, host=REDIS_HOST)
            queue.put(1)
            self.assertRaises(six.moves.queue.Full, queue.put, 1)

            queue = RedisQueue("test-queue-size-2", maxsize=2, host=REDIS_HOST)
            queue.put(1)
            queue.put(1)
            self.assertRaises(six.moves.queue.Full, queue.put, 1)
            queue.get()
            queue.get()
            self.assertRaises(six.moves.queue.Empty, queue.get_nowait)
예제 #3
0
    def test_queue_size(self):
        print(REDIS_HOST)
        print(os.getenv('REDIS_PORT_6379_TCP_ADDR'))
        queue = RedisQueue("test-queue-size-1", maxsize=1, host=REDIS_HOST)
        queue.put(1)
        with pytest.raises(six.moves.queue.Full):
            queue.put(1)

        queue = RedisQueue("test-queue-size-2", maxsize=2, host=REDIS_HOST)
        queue.put(1)
        queue.put(1)
        with pytest.raises(six.moves.queue.Full):
            queue.put(1)
        queue.get()
        queue.get()
        with pytest.raises(six.moves.queue.Empty):
            queue.get_nowait()
예제 #4
0
파일: game.py 프로젝트: bitcraft/sanic
def blit_thread(queue, screen, lock):
    screen_size = screen.get_size()
    scale = pygame.transform.scale
    flip = pygame.display.flip
    while 1:
        surface = queue.get()
        if surface is None:
            break
        else:
            with lock:
                scale(surface, screen_size, screen)
            flip()
예제 #5
0
 def _init_pool(arrs, queue, map_func):
     id = queue.get()
     global SHARED_ARR, WORKER_ID, MAP_FUNC
     SHARED_ARR = arrs[id]
     WORKER_ID = id
     MAP_FUNC = map_func
예제 #6
0
 def _init_pool(arrs, queue, map_func):
     id = queue.get()
     global SHARED_ARR, WORKER_ID, MAP_FUNC
     SHARED_ARR = arrs[id]
     WORKER_ID = id
     MAP_FUNC = map_func
예제 #7
0
 def _subscription_handler(self, queue, device_id, path, callback_fn):
     while True:
         value = queue.get()
         callback_fn(device_id, path, value)
def basic_queue_generator(queue, received):
    while True:
        value = queue.get()
        received.put(value)
        yield value
예제 #9
0
    def request_data(self,
                     resource_name,
                     params={},
                     check_cache=True,
                     timeout=DEFAULT_REQUEST_TIMEOUT,
                     paging_size=DEFAULT_PAGE_SIZE,
                     concurrent_requests=DEFAULT_CONCURRENT_REQUEST,
                     hold=DEFAULT_HOLD_TIME):
        """
        Request a 'resource_name' resource from the REST API. The request can be tuned with filtering, sorting options.
        Check the REST API documentation for available filters by resource.

        To bypass rate limiting feature of the API you can tune paging_size, concurrent_requests and hold_time parameters.
        'X' concurrent requests will be sent as chunk and we will wait the hold time before sending the next chunk until
        all resource_name have been retrieved.

        Parameters
        ----------
        resource_name : str
            Data resource name to be requested.
        params : dict (optional)
            Additional parameters to be sent along the query for filtering, sorting,... (default : empty dict).
        check_cache : bool (optional)
            Whether or not to check the cache instead of performing a call against the REST API.
        timeout : int (optional)
            Time to wait for a response from the REST API (default : piapi.DEFAULT_REQUEST_TIMEOUT)
        paging_size : int (optional)
            Number of entries to include per page (default : piapi.DEFAULT_PAGE_SIZE).
        concurrent_requests : int (optional)
            Number of parallel requests to make (default : piapi.DEFAULT_CONCURRENT_REQUEST).
        hold : int (optional)
            Hold time in second to wait between chunk of concurrent requests to avoid rate limiting (default : piapi.DEFAULT_HOLD_TIME).

        Returns
        -------
        results : JSON structure
            Data results from the requested resources.
        """
        if resource_name not in self.data_resources:
            raise PIAPIResourceNotFound(
                "Data Resource '%s' not found in the API, check 'data_resources' property "
                "for a list of available resource_name" % resource_name)

        #  Check the cache to see if the couple (resource + parameters) already exists (using SHA256 hash of resource_name and params)
        #  hash_cache = hashlib.sha256(b"%s%s" % (resource_name, params)).hexdigest()
        #   if check_cache and hash_cache in self.cache:
        #      return self.cache[hash_cache]

        #  Get total number of entries for the request
        response = self.session.get(self._data_resources[resource_name],
                                    params=params,
                                    timeout=timeout)
        self._parse(response)
        count_entry = int(response.json()["queryResponse"]["@count"])
        if count_entry <= 0:
            raise PIAPICountError(
                "No result found for the query %s with params %s" %
                (response.url, params))

        #  Create the necessary requests with paging to avoid rate limiting
        paging_requests = []
        queue = six.moves.queue.Queue()
        for first_result in range(0, count_entry, paging_size):
            params_copy = copy.deepcopy(params)
            params_copy.update({
                ".full": "true",
                ".firstResult": first_result,
                ".maxResults": paging_size
            })
            #paging_requests.append(grequests.get(self._data_resources[resource_name], session=self.session, params=params_copy, verify=self.verify, timeout=timeout))
            paging_requests.append(
                threading.Thread(None,
                                 self._request_wrapper,
                                 args=(queue,
                                       self._data_resources[resource_name],
                                       params_copy, timeout)))

        #  Create chunks from the previous list of requests to avoid rate limiting (we hold between each chunk)
        chunk_requests = [
            paging_requests[x:x + concurrent_requests]
            for x in range(0, len(paging_requests), concurrent_requests)
        ]

        #  Bulk query the chunk pages by waiting between each chunk to avoid rate limiting
        responses = []
        for chunk_request in chunk_requests:
            #responses += grequests.map(chunk_request)
            for request in chunk_request:
                request.start()
            for request in chunk_request:
                request.join()
                responses.append(queue.get())
            time.sleep(hold)

        #  Parse the results of the previous queries
        results = []
        for response in responses:
            response_json = self._parse(response)
            results += response_json["queryResponse"]["entity"]
        #  self.cache[hash_cache] = results
        return results
예제 #10
0
    def test__get_ftdc_file_path(self):
        """
        Test that a given directory is correctly searched for diagnostic.data directories and that
        the ouput is of the correct format
        """
        dir_path = "test_reports"
        if os.path.exists(dir_path):
            shutil.rmtree(dir_path)
        directory_structure = {
            "test_reports": {
                "graphs": {
                    "test_false.txt": None
                },
                "fio": {
                    "mongod.0": {
                        "diagnostic.data": {
                            "metrics.2019-09-09T17-24-55Z-00000": None,
                            "metrics.2019-09-09T17-24-25Z-00000": None,
                        },
                        "mongod.log": None,
                    },
                    "mongod.1": {
                        "diagnostic.data": {},
                        "mongod.log": None
                    },
                },
                "test_false.txt": None,
                "iperf": {
                    "db-correctness": {
                        "db-hash-check": {
                            "test_false.txt": None
                        }
                    },
                    "mongod.0": {
                        "diagnostic.data": {
                            "metrics.2019-09-09T17-24-55Z-00000": None
                        },
                        "mongod.log": None,
                    },
                    "mongod.1": {
                        "diagnostic.data": {
                            "metrics.2019-09-09T17-24-55Z-00000": None
                        },
                        "mongod.log": None,
                    },
                    "test_false.txt": None,
                },
                "_post_task": {
                    "mongod.0": {
                        "mdiag.sh": None
                    },
                    "mongod.1": {
                        "mdiag.sh": None
                    }
                },
            }
        }
        curr_dir = directory_structure[dir_path]
        queue = six.moves.queue.Queue()
        queue.put((dir_path, curr_dir))
        while not queue.empty():
            path, curr_dir = queue.get()
            os.mkdir(path)
            for sub_dir in curr_dir:
                if curr_dir[sub_dir] is None:
                    with open(os.path.join(path, sub_dir), "w") as handle:
                        handle.write("test")
                else:
                    queue.put((os.path.join(path, sub_dir), curr_dir[sub_dir]))

        ftdc_metric_paths = ftdc_analysis._get_ftdc_file_paths(dir_path)
        expected_result = {
            "mongod.0": {
                "iperf":
                os.path.abspath(
                    "test_reports/iperf/mongod.0/diagnostic.data/metrics.2019-09-09T17-24-55Z-00000"
                ),
                "fio":
                os.path.abspath(
                    "test_reports/fio/mongod.0/diagnostic.data/metrics.2019-09-09T17-24-55Z-00000"
                ),
            },
            "mongod.1": {
                "iperf":
                os.path.abspath(
                    "test_reports/iperf/mongod.1/diagnostic.data/metrics.2019-09-09T17-24-55Z-00000"
                )
            },
        }
        self.assertEqual(ftdc_metric_paths, expected_result)
        shutil.rmtree(dir_path)
예제 #11
0
  def _Worker():
    dump_syms = GetDumpSymsBinary(options.build_dir)
    while True:
      try:
        should_dump_syms = True
        reason = "no reason"
        binary = queue.get()

        run_once = True
        while run_once:
          run_once = False

          if not dump_syms:
            should_dump_syms = False
            reason = "Could not locate dump_syms executable."
            break

          dump_syms_output = subprocess.check_output(
              [dump_syms, '-i', binary]).decode('utf-8')
          header_info = dump_syms_output.splitlines()[0]
          binary_info = GetBinaryInfoFromHeaderInfo(header_info)
          if not binary_info:
            should_dump_syms = False
            reason = "Could not obtain binary information."
            break

          # See if the output file already exists.
          output_dir = os.path.join(options.symbols_dir, binary_info.name,
                                    binary_info.hash)
          output_path = os.path.join(output_dir, binary_info.name + '.sym')
          if os.path.isfile(output_path):
            should_dump_syms = False
            reason = "Symbol file already found."
            break

          # See if there is a symbol file already found next to the binary
          potential_symbol_files = glob.glob('%s.breakpad*' % binary)
          for potential_symbol_file in potential_symbol_files:
            with open(potential_symbol_file, 'rt') as f:
              symbol_info = GetBinaryInfoFromHeaderInfo(f.readline())
            if symbol_info == binary_info:
              CreateSymbolDir(options, output_dir, binary_info.hash)
              shutil.copyfile(potential_symbol_file, output_path)
              should_dump_syms = False
              reason = "Found local symbol file."
              break

        if not should_dump_syms:
          if options.verbose:
            with print_lock:
              print("Skipping %s (%s)" % (binary, reason))
          continue

        if options.verbose:
          with print_lock:
            print("Generating symbols for %s" % binary)

        CreateSymbolDir(options, output_dir, binary_info.hash)
        with open(output_path, 'wb') as f:
          subprocess.check_call([dump_syms, '-r', binary], stdout=f)
      except Exception as e:
        with exceptions_lock:
          exceptions.append(traceback.format_exc())
      finally:
        queue.task_done()
예제 #12
0
def test_queue_context(queue_in_process):
    "Test that the queue in the context manager behaves correctly"
    with queue_in_process[0] as queue:
        msg = queue.get()
        assert msg == queue_in_process[1]