Exemplo n.º 1
0
def logview():
    term = blessings.Terminal()
    if not term.is_a_tty:
        print("logview output cannot be piped, it is for display only")
    elif term.number_of_colors < 16:
        print(
            "logview requires a terminal that supports a minimum of 16 colours"
        )
    else:
        global config
        config = parse_args()
        print(f"\x1b]2;{TERM_TITLE}\x07", end="")
        colors.extend(COLORS[term.number_of_colors])
        for filename in config.file:
            filepath = os.path.abspath(os.path.join(config.dir, filename))
            if os.access(filepath, os.R_OK, effective_ids=True):
                windows.append(Window(filepath, config, Logger))
            elif os.access(filepath, os.F_OK, effective_ids=True):
                print(f"File '{filepath}' is not readable")
            else:
                print(f"File '{filepath}' does not exist")
            if windows and not windows[-1].reader.isOpen:
                windows.pop(len(windows) - 1)

        if windows:
            screen = None
            try:
                screen = curses.initscr()
                curses.noecho()
                curses.cbreak()
                screen.keypad(True)
                screen.nodelay(True)
                screen.getch()

                print(term.enter_fullscreen + term.hide_cursor +
                      term.color(colors[APP_FG]) +
                      term.on_color(colors[APP_BG]) + term.clear)
                draw_app(term)
                calculate_windows(term, False)
                for window in windows:
                    window.start(term)
                    # window.load()
                    # window.refresh(True)
                with futures.ThreadPoolExecutor() as pool:
                    threads = {
                        pool.submit(window.load): window
                        for window in windows
                    }
                    for future in futures.as_completed(threads):
                        threads[future].refresh(True)
                main_loop(term, screen)
            except Exception as err:
                raise err
            finally:
                tprint(term, term.exit_fullscreen + term.normal_cursor)
                curses.nocbreak()
                if screen:
                    screen.keypad(False)
                curses.echo()
                curses.endwin()
                print(*log, sep="\n")
            exit(0)
    exit(1)
    def gather_stage(self, harvest_job):
        self.job = harvest_job
        self._set_source_config(self.job.source.config)
        log.debug('Landsat-8 Harvester gather_stage for job: %r', harvest_job)

        self.provider = 'usgs'
        if not hasattr(self, 'provider_logger'):
            self.provider_logger = self.make_provider_logger()

        last_path_row = self._get_last_harvesting_tile(harvest_job.source_id)
        if last_path_row is not None:
            path = int(last_path_row[0])
            row = int(last_path_row[1])

            if path + 1 > PATH_MAX:
                path = 1
                row = 1 if row + 1 > ROW_MAX else row + 1
            else:
                path = path + 1
        else:
            path = self.source_config.get('path', 1)
            row = self.source_config.get('row', 1)

        update_all = self.source_config.get('update_all', False)
        bucket = self.source_config['bucket']
        s3 = self._set_s3_session()
        _ls_worker = partial(aws.list_directory, bucket, s3=s3)

        ids = []

        log.info('Harvesting Path: {} and Row: {}'.format(path, row))
        path = self._zeropad(path, 3)
        row = self._zeropad(row, 3)

        prefixes = ['c1/L8/{}/{}/'.format(path, row)]

        with futures.ThreadPoolExecutor(max_workers=2) as executor:
            results = executor.map(_ls_worker, prefixes)
            results = itertools.chain.from_iterable(results)

        scene_ids = [os.path.basename(key.strip('/')) for key in results]

        while not scene_ids:
            path = int(path)
            row = int(row)

            if path + 1 > PATH_MAX:
                path = 1
                row = 1 if row + 1 > ROW_MAX else row + 1
            else:
                path = path + 1

            log.info('Harvesting Path: {} and Row: {}'.format(path, row))
            path = self._zeropad(path, 3)
            row = self._zeropad(row, 3)

            prefixes = ['c1/L8/{}/{}/'.format(path, row)]

            with futures.ThreadPoolExecutor(max_workers=2) as executor:
                results = executor.map(_ls_worker, prefixes)
                results = itertools.chain.from_iterable(results)

            scene_ids = [os.path.basename(key.strip('/')) for key in results]

        for scene in scene_ids:
            _id = self._gather_entry(scene, int(path), int(row), update_all)
            if _id:
                ids.append(_id)

        return ids
Exemplo n.º 3
0
def main():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
    server.add_insecure_port('[::]:50051')
    server.start()
    server.wait_for_termination()
Exemplo n.º 4
0
        proxies_list = ['http://' + proxy for proxy in args.proxies]
    else:
        proxies_list = []
    num_threads = args.threads
    cnpj_list_to_import, filesFound, filesNotFound = read_cnpj_source_files(
        args.args)
    temp_dataset = load_temp_dataset()
    cnpj_list = remaining_cnpjs(cnpj_list_to_import, temp_dataset)

    print('%i CNPJ\'s to be fetched' % len(cnpj_list))
    print('Starting fetch. {0} worker threads and {1} http proxies'.format(
        num_threads, len(proxies_list)))

    # Try again in case of error during fetch_cnpj_info
    while len(cnpj_list) > 0:
        with futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
            future_to_cnpj_info = dict((executor.submit(fetch_cnpj_info, cnpj), cnpj)
                                       for cnpj in cnpj_list)
            last_saving_point = 0
            for future in futures.as_completed(future_to_cnpj_info):
                cnpj = future_to_cnpj_info[future]
                if future.exception() is None and future.result() is not None and future.result()['status'] == 'OK':
                    result_translated = transform_and_translate_data(
                        future.result())
                    temp_dataset = pd.concat([temp_dataset, result_translated])
                    if last_saving_point < divmod(len(temp_dataset.index), 100)[0]:
                        last_saving_point = divmod(len(temp_dataset.index), 100)[0]
                        print('###################################')
                        print('Saving information already fetched. {0} records'.format(
                            len(temp_dataset.index)))
                        temp_dataset.to_csv(TEMP_DATASET_PATH,
Exemplo n.º 5
0
# create a class to define the server functions, derived from
# calculator_pb2_grpc.CalculatorServicer
class CalculatorServicer(calculator_pb2_grpc.CalculatorServicer):

    # calculator.square_root is exposed here
    # the request and response are of the data type
    # calculator_pb2.Number
    def SquareRoot(self, request, context):
        response = calculator_pb2.Number()
        response.value = calculator.square_root(request.value)
        return response


# create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))

# use the generated function `add_CalculatorServicer_to_server`
# to add the defined class to the server
calculator_pb2_grpc.add_CalculatorServicer_to_server(CalculatorServicer(),
                                                     server)

# listen on port 50051
print('Starting server. Listening on port 50051.')
server.add_insecure_port('[::]:50051')
server.start()

# since server.start() will not block,
# a sleep-loop is added to keep alive
try:
    while True:
    def test_abort(self):
        """Check that we can catch an abort properly"""

        # Intercept gRPC calls...
        interceptor = server_interceptor()

        # our detailed failure message
        failure_message = "This is a test failure"

        # aborting RPC handler
        def handler(request, context):
            context.abort(grpc.StatusCode.FAILED_PRECONDITION, failure_message)

        with futures.ThreadPoolExecutor(max_workers=1) as executor:
            server = grpc.server(
                executor,
                options=(("grpc.so_reuseport", 0), ),
                interceptors=[interceptor],
            )

            server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler), ))

            port = server.add_insecure_port("[::]:0")
            channel = grpc.insecure_channel(f"localhost:{port:d}")

            rpc_call = "TestServicer/handler"

            server.start()
            # unfortunately, these are just bare exceptions in grpc...
            with self.assertRaises(Exception):
                channel.unary_unary(rpc_call)(b"")
            server.stop(None)

        spans_list = self.memory_exporter.get_finished_spans()
        self.assertEqual(len(spans_list), 1)
        span = spans_list[0]

        self.assertEqual(span.name, rpc_call)
        self.assertIs(span.kind, trace.SpanKind.SERVER)

        # Check version and name in span's instrumentation info
        self.assertEqualSpanInstrumentationInfo(
            span, opentelemetry.instrumentation.grpc)

        # make sure this span errored, with the right status and detail
        self.assertEqual(span.status.status_code, StatusCode.ERROR)
        self.assertEqual(
            span.status.description,
            f"{grpc.StatusCode.FAILED_PRECONDITION}:{failure_message}",
        )

        # Check attributes
        self.assertSpanHasAttributes(
            span,
            {
                SpanAttributes.NET_PEER_IP:
                "[::1]",
                SpanAttributes.NET_PEER_NAME:
                "localhost",
                SpanAttributes.RPC_METHOD:
                "handler",
                SpanAttributes.RPC_SERVICE:
                "TestServicer",
                SpanAttributes.RPC_SYSTEM:
                "grpc",
                SpanAttributes.RPC_GRPC_STATUS_CODE:
                grpc.StatusCode.FAILED_PRECONDITION.value[0],
            },
        )
Exemplo n.º 7
0
class Crawler:
    '''Blueprint for creating new spiders'''

    home_url = ''
    novel_url = ''
    last_visited_url = None
    scraper = cfscrape.create_scraper()
    executor = futures.ThreadPoolExecutor(max_workers=5)
    '''Must resolve these fields inside `read_novel_info`'''
    novel_title = 'N/A'
    novel_author = 'N/A'
    novel_cover = None
    novel_summary = None
    '''
    Each item must contain these keys:
    `title` - the title of the volume
    '''
    volumes = []
    '''
    Each item must contain these keys:
    `id` - the index of the chapter
    `title` - the title name
    `volume` - the volume id of this chapter
    `url` - the link where to download the chapter
    `name` - the chapter name, e.g: 'Chapter 3' or 'Chapter 12 (Special)'
    '''
    chapters = []

    def __init__(self):
        self.scraper.verify = False

    # ------------------------------------------------------------------------- #
    # Implement these methods
    # ------------------------------------------------------------------------- #

    def initialize(self):
        pass

    def dispose(self):
        pass

    @property
    def supports_login(self):
        '''Whether the crawler supports login() and logout method'''
        return False

    def login(self, email, password):
        pass

    def logout(self):
        pass

    def read_novel_info(self, url):
        '''Get novel title, autor, cover etc'''
        pass

    def download_chapter_list(self):
        '''Download list of chapters and volumes.'''
        pass

    def download_chapter_body(self, chapter):
        '''Download body of a single chapter and return as clean html format.'''
        pass

    def get_chapter_index_of(self, url):
        '''Return the index of chapter by given url or 0'''
        url = (url or '').strip().strip('/')
        for chapter in self.chapters:
            if chapter['url'] == url:
                return chapter['id']

        return 0

    # ------------------------------------------------------------------------- #
    # Helper methods to be used
    # ------------------------------------------------------------------------- #
    @property
    def headers(self):
        return self.scraper.headers.copy()

    @property
    def cookies(self):
        return {x.name: x.value for x in self.scraper.cookies}

    def absolute_url(self, url):
        if not url or len(url) == 0:
            return None
        elif url.startswith('//'):
            return 'http:' + url
        elif url.find('//') >= 0:
            return url
        elif url.startswith('/'):
            return self.home_url + url
        else:
            return (self.last_visited_url or self.home_url) + '/' + url

    def get_response(self, url, incognito=False):
        self.last_visited_url = url.strip('/')
        response = self.scraper.get(url)
        response.encoding = 'utf-8'
        self.cookies.update({x.name: x.value for x in response.cookies})
        return response

    def submit_form(self, url, multipart=False, headers={}, **data):
        '''Submit a form using post request'''
        headers = {
            'content-type':
            'multipart/form-data'
            if multipart else 'application/x-www-form-urlencoded'
        }
        response = self.scraper.post(url, data=data, headers=headers)
        self.cookies.update({x.name: x.value for x in response.cookies})
        return response

    def download_cover(self, output_file):
        response = self.get_response(self.novel_cover)
        with open(output_file, 'wb') as f:
            f.write(response.content)
        # end with

    blacklist_patterns = [
        r'^(volume|chapter) .?\d+$',
    ]

    def not_blacklisted(self, text):
        for pattern in self.blacklist_patterns:
            if re.search(pattern, text, re.IGNORECASE):
                return False

        return True

    def extract_contents(self, contents, level=0):
        body = []
        for elem in contents:
            if ['script', 'iframe', 'form', 'a', 'br', 'img'].count(elem.name):
                continue
            elif ['h3', 'div', 'p'].count(elem.name):
                body += self.extract_contents(elem.contents, level + 1)
                continue

            if not elem.name:
                text = str(elem).strip()
            else:
                text = '<%s>%s</%s>' % (elem.name, elem.text.strip(),
                                        elem.name)

            patterns = [
                re.compile(r'<!--(.|\n)*-->', re.MULTILINE),
                re.compile(r'\[if (.|\n)*!\[endif\]', re.MULTILINE),
            ]
            for x in patterns:
                text = x.sub('', text).strip()

            if text:
                body.append(text)

        if level == 0:
            return [x for x in body if len(x) and self.not_blacklisted(x)]
        else:
            return body
Exemplo n.º 8
0
class Hexitec():
    """Hexitec: Class that extracts and stores information about system-level parameters."""

    # Thread executor used for background tasks
    thread_executor = futures.ThreadPoolExecutor(max_workers=3)

    def __init__(self, options):
        """Initialise the Hexitec object.

        This constructor initialises the Hexitec object, building a
        parameter tree and launching a background task if enabled
        """
        defaults = HexitecDetectorDefaults()
        self.file_dir = options.get("save_dir", defaults.save_dir)
        self.file_name = options.get("save_file", defaults.save_file)
        self.number_frames = options.get("acquisition_num_frames",
                                         defaults.number_frames)
        self.number_frames_to_request = self.number_frames
        self.total_delay = 0.0
        # Backup number_frames as first initialisation temporary sets number_frames = 2
        self.backed_up_number_frames = self.number_frames

        self.duration = 1
        self.duration_enable = False

        self.daq = HexitecDAQ(self, self.file_dir, self.file_name)

        self.adapters = {}

        self.fem = None
        for key, value in options.items():
            if "fem" in key:
                fem_info = value.split(',')
                fem_info = [(i.split('=')[0], i.split('=')[1])
                            for i in fem_info]
                fem_dict = {
                    fem_key.strip(): fem_value.strip()
                    for (fem_key, fem_value) in fem_info
                }
                logging.debug(fem_dict)
                self.fem = HexitecFem(
                    self,
                    fem_dict.get("server_ctrl_ip_addr",
                                 defaults.fem["server_ctrl_ip"]),
                    fem_dict.get("camera_ctrl_ip_addr",
                                 defaults.fem["camera_ctrl_ip"]),
                    fem_dict.get("server_data_ip_addr",
                                 defaults.fem["server_data_ip"]),
                    fem_dict.get("camera_data_ip_addr",
                                 defaults.fem["camera_data_ip"]))

        if not self.fem:
            self.fem = HexitecFem(
                parent=self,
                server_ctrl_ip_addr=defaults.fem["server_ctrl_ip"],
                camera_ctrl_ip_addr=defaults.fem["camera_ctrl_ip"],
                server_data_ip_addr=defaults.fem["server_data_ip"],
                camera_data_ip_addr=defaults.fem["camera_data_ip"])

        self.fem_health = True

        # Bias (clock) tracking variables #
        self.bias_clock_running = False
        self.bias_init_time = 0  # Placeholder
        self.bias_blocking_acquisition = False
        self.extended_acquisition = False  # Track acquisition spanning bias window(s)
        self.frames_already_acquired = 0  # Track frames acquired across collection windows

        self.collect_and_bias_time = self.fem.bias_refresh_interval + \
            self.fem.bias_voltage_settle_time + self.fem.time_refresh_voltage_held

        # Tracks whether first acquisition of multiple, bias-window(s), collection
        self.initial_acquisition = True
        # Tracks whether 2 frame fudge collection: (during cold initialisation)
        self.first_initialisation = True

        self.acquisition_in_progress = False

        # Watchdog variables
        self.error_margin = 400  # TODO: Revisit timeouts
        self.fem_tx_timeout = 5000
        self.daq_rx_timeout = self.collect_and_bias_time + self.error_margin
        self.fem_start_timestamp = 0
        self.time_waiting_for_data_arrival = 0

        # Store initialisation time
        self.init_time = time.time()

        self.system_health = True
        self.status_message = ""
        self.status_error = ""
        self.elog = ""
        self.number_nodes = 1
        # Software states:
        #   Cold, Disconnected, Idle, Acquiring
        self.software_state = "Cold"
        self.cold_initialisation = True

        detector = ParameterTree({
            "fem":
            self.fem.param_tree,
            "daq":
            self.daq.param_tree,
            "connect_hardware": (None, self.connect_hardware),
            "initialise_hardware": (None, self.initialise_hardware),
            "disconnect_hardware": (None, self.disconnect_hardware),
            "collect_offsets": (None, self._collect_offsets),
            "commit_configuration": (None, self.commit_configuration),
            "software_state": (lambda: self.software_state, None),
            "cold_initialisation": (lambda: self.cold_initialisation, None),
            "hv_on": (None, self.hv_on),
            "hv_off": (None, self.hv_off),
            "acquisition": {
                "number_frames":
                (lambda: self.number_frames, self.set_number_frames),
                "duration": (lambda: self.duration, self.set_duration),
                "duration_enable":
                (lambda: self.duration_enable, self.set_duration_enable),
                "start_acq": (None, self.acquisition),
                "stop_acq": (None, self.cancel_acquisition)
            },
            "status": {
                "system_health": (lambda: self.system_health, None),
                "status_message": (lambda: self.status_message, None),
                "status_error": (lambda: self.status_error, None),
                "elog": (lambda: self.elog, self.set_elog),
                "fem_health": (lambda: self.fem_health, None),
                "number_nodes":
                (lambda: self.number_nodes, self.set_number_nodes)
            }
        })

        self.system_info = SystemInfo()

        # Store all information in a parameter tree
        self.param_tree = ParameterTree({
            "system_info": self.system_info.param_tree,
            "detector": detector
        })

        self._start_polling()

    def _start_polling(self):
        IOLoop.instance().add_callback(self.polling)

    def polling(self):  # pragma: no cover
        """Poll FEM for status.

        Check if acquisition completed (if initiated), for error(s) and
        whether DAQ/FEM watchdogs timed out.
        """
        # Poll FEM acquisition & health status
        self.poll_fem()

        # Watchdog: Watch FEM in case no data from hardware triggered by fem.acquire_data()
        self.check_fem_watchdog()

        # TODO: WATCHDOG, monitor HexitecDAQ rate of frames_processed updated.. (Break if stalled)
        self.check_daq_watchdog()

        IOLoop.instance().call_later(1.0, self.polling)

    def get_frames_processed(self):
        """Get number of frames processed across node(s)."""
        status = self._get_od_status("fp")
        frames_processed = 0
        for index in status:
            # rank = index.get('hdf', None).get('rank')
            # frames = index.get('histogram').get('frames_processed')
            # print("    g_f_p(), rank: {} frames_processed: {}".format(rank, frames))
            frames_processed = frames_processed + index.get('histogram').get(
                'frames_processed')
        return frames_processed

    def poll_fem(self):
        """Poll FEM for acquisition and health status."""
        if self.fem.acquisition_completed:
            frames_processed = self.get_frames_processed()
            # Either cold initialisation (first_initialisation is True, therefore only 2 frames
            # expected) or, ordinary collection (self.number_frames frames expected)
            if ((self.first_initialisation and (frames_processed == 2))
                    or (frames_processed == self.number_frames)):  # noqa: W503

                if self.first_initialisation:
                    self.first_initialisation = False
                    self.number_frames = self.backed_up_number_frames  # TODO: redundant

                # Reset FEM's acquisiton status ahead of future acquisitions
                self.fem.acquisition_completed = False
        # TODO: Also check sensor values?
        # ..
        fem_health = self.fem.get_health()
        self.fem_health = fem_health
        if self.system_health:
            self.status_error = self.fem._get_status_error()
            self.status_message = self.fem._get_status_message()
            self.system_health = self.system_health and self.fem_health

    def check_fem_watchdog(self):
        """Check data sent when FEM acquiring data."""
        if self.acquisition_in_progress:
            # TODO: Monitor FEM in case no data following fem.acquire_data() call
            if (self.fem.hardware_busy):
                fem_begun = self.fem.acquire_timestamp
                delta_time = time.time() - fem_begun
                logging.debug("    FEM w-dog: {0:.2f} < {1:.2f}".format(
                    delta_time, self.fem_tx_timeout))
                if (delta_time > self.fem_tx_timeout):
                    self.fem.stop_acquisition = True
                    self.shutdown_processing()
                    logging.error("FEM data transmission timed out")
                    error = "Timed out waiting ({0:.2f} seconds) for FEM data".format(
                        delta_time)
                    self.fem._set_status_message(error)

    def check_daq_watchdog(self):
        """Monitor DAQ's frames_processed while data processed.

        Ensure frames_processed increments, completes within reasonable time of acquisition.
        Failure to do so indicate missing/dropped packet(s), stop processing if stalled.
        """
        if self.daq.in_progress:
            processed_timestamp = self.daq.processed_timestamp
            delta_time = time.time() - processed_timestamp
            if (delta_time > self.daq_rx_timeout):
                logging.error("    DAQ -- PROCESSING TIMED OUT")
                # DAQ: Timed out waiting for next frame to process
                self.shutdown_processing()
                logging.error(
                    "DAQ processing timed out; Saw %s expected %s frames" %
                    (self.daq.frames_processed,
                     self.daq.frame_end_acquisition))
                self.fem._set_status_error(
                    "Processing timed out: {0:.2f} seconds \
                    (exceeded {1:.2f}); Expected {2} got {3} frames\
                        ".format(delta_time, self.daq_rx_timeout,
                                 self.daq.frame_end_acquisition,
                                 self.daq.frames_processed))
                self.fem._set_status_message("Processing abandoned")

    def shutdown_processing(self):
        """Stop processing in DAQ."""
        self.daq.shutdown_processing = True
        self.acquisition_in_progress = False

    def _get_od_status(self, adapter):
        """Get status from adapter."""
        try:
            request = ApiAdapterRequest(None, content_type="application/json")
            response = self.adapters[adapter].get("status", request)
            response = response.data["value"]
        except KeyError:
            logging.warning("%s Adapter Not Found" % adapter)
            response = [{"Error": "Adapter {} not found".format(adapter)}]
        finally:
            return response

    def connect_hardware(self, msg):
        """Set up watchdog timeout, start bias clock and connect with hardware."""
        # TODO: Must recalculate collect and bias time both here and in initialise()
        #   Logically, commit_configuration() is the best place but it updates variables before
        #   reading .ini file
        self.collect_and_bias_time = self.fem.bias_refresh_interval + \
            self.fem.bias_voltage_settle_time + self.fem.time_refresh_voltage_held

        self.daq_rx_timeout = self.collect_and_bias_time + self.error_margin
        # Start bias clock if not running
        if not self.bias_clock_running:
            IOLoop.instance().add_callback(self.start_bias_clock)
        self.fem.connect_hardware(msg)
        self.software_state = "Idle"

    def start_bias_clock(self):
        """Set up bias 'clock'."""
        if not self.bias_clock_running:
            self.bias_init_time = time.time()
            self.bias_clock_running = True
        self.poll_bias_clock()

    def poll_bias_clock(self):
        """Call periodically (0.1 seconds often enough??) to bias window status.

        Are we in bias refresh intv /  refresh volt held / Settle time ?
        Example: 60000 / 3000 / 2000: Collect for 60s, pause for 3+2 secs
        """
        current_time = time.time()
        time_elapsed = current_time - self.bias_init_time
        if (time_elapsed < self.fem.bias_refresh_interval):
            # Still within collection window - acquiring data is allowed
            pass
        else:
            if (time_elapsed < self.collect_and_bias_time):
                # Blackout period - Wait for electrons to replenish/voltage to stabilise
                self.bias_blocking_acquisition = True
            else:
                # Beyond blackout period - Back within bias
                # Reset bias clock
                self.bias_init_time = current_time
                self.bias_blocking_acquisition = False

        IOLoop.instance().call_later(0.1, self.poll_bias_clock)

    def initialise_hardware(self, msg):
        """Initialise hardware.

        Recalculate collect and bias timing, update watchdog timeout.
        """
        # TODO: Must recalculate collect and bias time both here and in initialise();
        #   Logically, commit_configuration() is the best place but it updates variables before
        #   values read from .ini file
        self.collect_and_bias_time = self.fem.bias_refresh_interval + \
            self.fem.bias_voltage_settle_time + self.fem.time_refresh_voltage_held

        self.daq_rx_timeout = self.collect_and_bias_time + self.error_margin
        # If first initialisation, ie fudge, temporarily change number_frames to 2
        # Adapter also controls this change in FEM
        if self.first_initialisation:
            self.backed_up_number_frames = self.number_frames
            self.number_frames = 2
            # TODO: Fix this fudge?
            self.fem.acquire_timestamp = time.time()
            self.acquisition_in_progress = True
        self.fem.initialise_hardware(msg)
        # Wait for fem initialisation/fudge frames
        IOLoop.instance().call_later(0.5, self.monitor_fem_progress)

    def disconnect_hardware(self, msg):
        """Disconnect FEM's hardware connection."""
        if self.daq.in_progress:
            # Stop hardware if still in acquisition
            if self.fem.hardware_busy:
                self.cancel_acquisition()
            # Reset daq
            self.shutdown_processing()
            # Allow processing to shutdown before disconnecting hardware
            IOLoop.instance().call_later(0.2, self.fem.disconnect_hardware)
        else:
            # Nothing in progress, disconnect hardware
            self.fem.disconnect_hardware(msg)
        self.software_state = "Disconnected"
        # Reset system status
        self.status_error = ""
        self.status_message = ""
        self.system_health = True
        # Stop bias clock
        if self.bias_clock_running:
            self.bias_clock_running = False

    def set_duration_enable(self, duration_enable):
        """Set duration enable, calculating number of frames accordingly."""
        self.duration_enable = duration_enable
        self.fem.set_duration_enable(duration_enable)
        # Ensure DAQ, FEM have correct duration/number of frames configured
        if duration_enable:
            self.set_duration(self.duration)
        else:
            # print("\n\tadp.set_duration_enable({}) number_frames: {}\n".format(duration_enable, self.number_frames))
            self.set_number_frames(self.number_frames)

    def set_number_frames(self, frames):
        """Set number of frames in DAQ, FEM."""
        # print("\n\tadp.set_number_frames({}) -> number_frames: {}\n".format(frames, self.number_frames))
        self.number_frames = frames
        # Update number of frames in Hardware, and (via DAQ) in histogram and hdf plugins
        self.fem.set_number_frames(self.number_frames)
        self.daq.set_number_frames(self.number_frames)

    def set_duration(self, duration):
        """Set duration, calculate frames from frame rate and update DAQ, FEM."""
        self.duration = duration
        self.fem.set_duration(self.duration)
        # print("\n\tadp.set_duration({}) number_frames {} -> {}\n".format(duration, self.fem.get_number_frames(), self.number_frames))
        self.number_frames = self.fem.get_number_frames()
        self.daq.set_number_frames(self.number_frames)

    def set_elog(self, entry):
        """Set the elog entry provided by the user through the UI."""
        self.elog = entry

    def set_number_nodes(self, number_nodes):
        """Set number of nodes."""
        self.number_nodes = number_nodes
        self.daq.set_number_nodes(self.number_nodes)

    def initialize(self, adapters):
        """Get references to adapters, and pass these to the classes that need to use them."""
        self.adapters = dict(
            (k, v) for k, v in adapters.items() if v is not self)
        self.daq.initialize(self.adapters)

    def acquisition(self, put_data=None):
        """Instruct DAQ and FEM to acquire data."""
        # Synchronise first_initialisation status (i.e. collect 2 fudge frames) with FEM
        if self.first_initialisation:
            self.first_initialisation = self.fem.first_initialisation
        else:
            # Clear (any previous) daq error
            self.daq.in_error = False

        if self.extended_acquisition is False:
            if self.daq.in_progress:
                logging.warning("Cannot Start Acquistion: Already in progress")
                self.fem._set_status_error(
                    "Cannot Start Acquistion: Already in progress")
                return

        self.total_delay = 0
        self.number_frames_to_request = self.number_frames

        if self.fem.bias_voltage_refresh:
            # Did the acquisition coincide with bias dead time?
            if self.bias_blocking_acquisition:
                IOLoop.instance().call_later(0.1, self.acquisition)
                return

            # Work out how many frames can be acquired before next bias refresh
            time_into_window = time.time() - self.bias_init_time
            time_available = self.fem.bias_refresh_interval - time_into_window

            if time_available < 0:
                IOLoop.instance().call_later(0.09, self.acquisition)
                return

            frames_before_bias = self.fem.frame_rate * time_available
            number_frames_before_bias = int(round(frames_before_bias))

            self.number_frames_to_request = self.number_frames - self.frames_already_acquired

            # Can we obtain all required frames within current bias window?
            if (number_frames_before_bias < self.number_frames_to_request):
                # Need >1 bias window to fulfil acquisition
                self.extended_acquisition = True
                self.number_frames_to_request = number_frames_before_bias

            self.total_delay = time_available + self.fem.bias_voltage_settle_time + \
                self.fem.time_refresh_voltage_held

        # # TODO: Remove once Firmware made to reset on each new acquisition
        # # TODO: WILL BE NON 0 VALUE IN THE FUTURE - TO SUPPORT BIAS REFRESH INTV
        # #       BUT, if nonzero then won't FP's Acquisition time out before processing done?????
        # #
        # Reset Reorder plugin's frame_number (to current frame number, for multi-window acquire)
        command = "config/reorder/frame_number"
        request = ApiAdapterRequest(self.file_dir,
                                    content_type="application/json")
        request.body = "{}".format(self.frames_already_acquired)
        self.adapters["fp"].put(command, request)
        # TODO: To be removed once firmware updated? FP may be slow to process frame_number reset
        time.sleep(0.5)

        # Reset histograms, call DAQ's prepare_daq() once per acquisition
        if self.initial_acquisition:
            # Issue reset to histogram
            command = "config/histogram/reset_histograms"
            request = ApiAdapterRequest(self.file_dir,
                                        content_type="application/json")
            request.body = "{}".format(1)
            self.adapters["fp"].put(command, request)

            self.daq_target = time.time()
            self.daq.prepare_daq(self.number_frames)
            self.initial_acquisition = False
            # Acquisition (whether single/multi-run) starts here
            self.acquisition_in_progress = True

        # Wait for DAQ (i.e. file writer) to be enabled before FEM told to collect data
        # IOLoop.instance().call_later(0.1, self.await_daq_ready)
        IOLoop.instance().add_callback(self.await_daq_ready)

    def await_daq_ready(self):
        """Wait until DAQ has configured, enabled file writer."""
        if (self.daq.in_error):
            # Reset state variables
            self.reset_state_variables()
        elif (self.daq.file_writing is False):
            IOLoop.instance().call_later(0.05, self.await_daq_ready)
        else:
            self.software_state = "Acquiring"
            # Add additional 8 ms delay to ensure file writer's file open before first frame arrives
            IOLoop.instance().call_later(0.08, self.trigger_fem_acquisition)

    def trigger_fem_acquisition(self):
        """Trigger data acquisition in fem."""
        # TODO: Temp hack: Prevent frames being 1 (continuous readout) by setting to 2 if it is
        self.number_frames_to_request = 2 if (self.number_frames_to_request == 1) else \
            self.number_frames_to_request
        self.fem.set_number_frames(self.number_frames_to_request)
        self.fem.collect_data()

        self.frames_already_acquired += self.number_frames_to_request
        # Note when FEM told to begin collecting data
        self.fem_start_timestamp = time.time()
        IOLoop.instance().call_later(self.total_delay,
                                     self.monitor_fem_progress)

    def monitor_fem_progress(self):
        """Check fem hardware progress.

        Busy either:
        -Initialising from cold (2 fudge frames)
        -Normal initialisation
        -Waiting for data collection to complete, either single/multi run
        """
        if (self.fem.hardware_busy):
            # Still sending data
            IOLoop.instance().call_later(0.5, self.monitor_fem_progress)
            return
        else:
            # Current collection completed; Do we have all the requested frames?
            if self.extended_acquisition:
                if (self.frames_already_acquired < self.number_frames):
                    # Need further bias window(s)
                    IOLoop.instance().add_callback(self.acquisition)
                    return

        # Issue reset to summed_image
        command = "config/summed_image/reset_image"
        request = ApiAdapterRequest(self.file_dir,
                                    content_type="application/json")
        request.body = "{}".format(1)
        self.adapters["fp"].put(command, request)

        rc = self.daq.prepare_odin()
        if not rc:
            message = "Prepare Odin failed!"
            self.fem._set_status_error(message)
            self.status_error = message

        self.reset_state_variables()

    def reset_state_variables(self):
        """Reset state variables.

        Utilised by await_daq_ready(), monitor_fem_progress()
        """
        self.initial_acquisition = True
        self.extended_acquisition = False
        self.acquisition_in_progress = False
        self.frames_already_acquired = 0
        self.software_state = "Idle"

    def cancel_acquisition(self, put_data=None):
        """Cancel ongoing acquisition in Software.

        Not yet possible to stop FEM, mid-acquisition
        """
        self.fem.stop_acquisition = True
        # Inject End of Acquisition Frame
        command = "config/inject_eoa"
        request = ApiAdapterRequest("", content_type="application/json")
        self.adapters["fp"].put(command, request)
        self.shutdown_processing()
        self.software_state = "Idle"

    def _collect_offsets(self, msg):
        """Instruct FEM to collect offsets."""
        self.fem.collect_offsets()

    def commit_configuration(self, msg):
        """Push HexitecDAQ's 'config/' ParameterTree settings into FP's plugins."""
        self.daq.commit_configuration()
        # Clear cold initialisation if first config commit
        if self.cold_initialisation:
            self.cold_initialisation = False

    def hv_on(self, msg):
        """Switch HV on."""
        # TODO: Complete placeholder
        self.fem.hv_bias_enabled = True

    def hv_off(self, msg):
        """Switch HV off."""
        # TODO: Complete placeholder
        self.fem.hv_bias_enabled = False

    def get(self, path):
        """
        Get the parameter tree.

        This method returns the parameter tree for use by clients via the Hexitec adapter.

        :param path: path to retrieve from tree
        """
        return self.param_tree.get(path)

    def set(self, path, data):
        """
        Set parameters in the parameter tree.

        This method simply wraps underlying ParameterTree method so that an exception can be
        re-raised with an appropriate HexitecError.

        :param path: path of parameter tree to set values for
        :param data: dictionary of new data values to set in the parameter tree
        """
        try:
            self.param_tree.set(path, data)
        except ParameterTreeError as e:
            raise HexitecError(e)
Exemplo n.º 9
0
def copy_dataset(input_path,
                 input_key,
                 output_path,
                 output_key,
                 is_h5,
                 convert_dtype=False,
                 chunks=None,
                 n_threads=1,
                 overwrite=False):

    with open_file(input_path, 'r') as f_in, open_file(output_path,
                                                       'a') as f_out:

        ds_in = f_in[input_key]
        shape = ds_in.shape

        have_data = output_key in f_out
        if have_data and not overwrite:
            return True

        # validate chunks
        if chunks is None:
            chunks_ = True if is_h5 else None
        else:
            chunks_ = tuple(min(ch, sh) for sh, ch in zip(shape, chunks))

        if convert_dtype:
            out_dtype = get_new_dtype(ds_in.dtype)
        else:
            out_dtype = ds_in.dtype

        if overwrite:
            del f_out[output_key]

        # create the output dataset and get the effective chunks
        ds_out = f_out.create_dataset(output_key,
                                      shape=shape,
                                      chunks=chunks_,
                                      compression='gzip',
                                      dtype=out_dtype)
        ds_chunks = ds_out.chunks

        def copy_chunk(bb):
            data = ds_in[bb]
            # skip empty chunks
            if data.sum() == 0:
                return
            if convert_dtype:
                data = convert_to_bdv_dtype(data)
            ds_out[bb] = data

        print("Copy initial dataset from: %s:%s to %s:%s" %
              (input_path, input_key, output_path, output_key))

        blocks = list(blocking(shape, ds_chunks))
        if n_threads > 1:
            with futures.ThreadPoolExecutor(n_threads) as tp:
                list(tqdm(tp.map(copy_chunk, blocks), total=len(blocks)))
        else:
            for bb in tqdm(blocks, total=len(blocks)):
                copy_chunk(bb)

    return False
Exemplo n.º 10
0
 def home2():
     """Doesn't work and should return the value from env."""
     with rasterio.Env(GDAL_DISABLE_READDIR_ON_OPEN="FALSE"):
         with futures.ThreadPoolExecutor() as executor:
             res = list(executor.map(f, range(1)))[0]
     return {"env": res}
Exemplo n.º 11
0
 async def home3():
     """Works and should return FALSE."""
     with rasterio.Env(GDAL_DISABLE_READDIR_ON_OPEN="FALSE"):
         with futures.ThreadPoolExecutor() as executor:
             res = list(executor.map(f, range(1)))[0]
     return {"env": res}
Exemplo n.º 12
0
 async def home3():
     """Works and should return FALSE."""
     with futures.ThreadPoolExecutor() as executor:
         res = list(executor.map(f, range(1)))[0]
     return {"env": res}
Exemplo n.º 13
0
 def home2():
     """Doesn't work and should return the value from env."""
     with futures.ThreadPoolExecutor() as executor:
         res = list(executor.map(f, range(1)))[0]
     return {"env": res}
Exemplo n.º 14
0
class IBMQJob(BaseJob):
    """IBM Q Job class

    Attributes:
        _executor (futures.Executor): executor to handle asynchronous jobs
        _final_states (list(JobStatus)): terminal states of async jobs
    """
    _executor = futures.ThreadPoolExecutor()
    _final_states = [
        JobStatus.DONE,
        JobStatus.CANCELLED,
        JobStatus.ERROR
    ]

    def __init__(self, qobj, api, is_device):
        """IBMQJob init function.

        Args:
            qobj (dict): job description
            api (IBMQuantumExperience): IBM Q API
            is_device (bool): whether backend is a real device  # TODO: remove this after Qobj
        """
        super().__init__()
        self._qobj = qobj
        self._api = api
        self._id = None  # this must be before creating the future
        self._backend_name = self._qobj.get('config').get('backend_name')
        self._status = JobStatus.INITIALIZING
        self._future_submit = self._executor.submit(self._submit)
        self._status_msg = 'Job is initializing. Please, wait a moment.'
        self._queue_position = None
        self._cancelled = False
        self._exception = None
        self._is_device = is_device
        self.creation_date = datetime.datetime.utcnow().replace(
            tzinfo=datetime.timezone.utc).isoformat()

    @classmethod
    def from_api(cls, job_info, api, is_device):
        """Instantiates job using information returned from
        IBMQuantumExperience about a particular job.

        Args:
            job_info (dict): This is the information about a job returned from
                the API. It has the simplified structure::

                    {'backend': {'id', 'backend id string',
                                 'name', 'ibmqx4'},
                     'id': 'job id string',
                     'qasms': [{'executionId': 'id string',
                                'qasm': 'qasm string'},
                              ]
                     'status': 'status string',
                     'seed': '1',
                     'shots': 1024,
                     'status': 'status string',
                     'usedCredits': 3,
                     'creationDate': '2018-06-13T04:31:13.175Z'
                     'userId': 'user id'}

            api (IBMQuantumExperience): IBM Q API
            is_device (bool): whether backend is a real device  # TODO: remove this after Qobj

        Returns:
            IBMQJob: an instance of this class
        """
        job_instance = cls.__new__(cls)
        job_instance._status = JobStatus.QUEUED
        job_instance._backend_name = job_info.get('backend').get('name')
        job_instance._api = api
        job_instance._id = job_info.get('id')
        job_instance._exception = None  # needs to be before status call below
        job_instance._status_msg = None
        job_instance._queue_position = None
        job_instance._cancelled = False
        job_instance._is_device = is_device
        job_instance.creation_date = job_info.get('creationDate')
        return job_instance

    def result(self, timeout=None, wait=5):
        """Return the result from the job.

        Args:
           timeout (int): number of seconds to wait for job
           wait (int): time between queries to IBM Q server

        Returns:
            Result: Result object

        Raises:
            IBMQJobError: exception raised during job initialization
        """
        # pylint: disable=arguments-differ
        while self._status == JobStatus.INITIALIZING:
            if self._future_submit.exception():
                raise IBMQJobError('error submitting job: {}'.format(
                    repr(self._future_submit.exception())))
            time.sleep(0.1)
        try:
            this_result = self._wait_for_job(timeout=timeout, wait=wait)
        except TimeoutError as err:
            # A timeout error retrieving the results does not imply the job
            # is failing. The job can be still running.
            return Result({'id': self._id, 'status': 'ERROR',
                           'result': str(err)})

        if self._is_device and self.done:
            _reorder_bits(this_result)

        if self._status not in self._final_states:
            if this_result.get_status() == 'ERROR':
                self._status = JobStatus.ERROR
            else:
                self._status = JobStatus.DONE
        return this_result

    def cancel(self):
        """Attempt to cancel job. Currently this is only possible on
        commercial systems.
        Returns:
            bool: True if job can be cancelled, else False.

        Raises:
            IBMQJobError: if server returned error
        """
        if self._is_commercial:
            hub = self._api.config['hub']
            group = self._api.config['group']
            project = self._api.config['project']
            response = self._api.cancel_job(self._id, hub, group, project)
            if 'error' in response:
                err_msg = response.get('error', '')
                error = IBMQJobError('Error cancelling job: %s' % err_msg)
                self._exception = error
                raise error
            else:
                self._cancelled = True
                return True
        else:
            self._cancelled = False
            return False

    @property
    def status(self):
        self._update_status()
        stats = {
            'job_id': self._id,
            'status': self._status,
            'status_msg': self._status_msg
        }
        if self._queue_position:
            stats['queue_position'] = self._queue_position
            # Reset once consumed to allow _update_status to regenerate the
            # value if needed.
            self._queue_position = None
        return stats

    def _update_status(self):
        """Query the API to update the status."""
        if (self._status in self._final_states or
                self._status == JobStatus.INITIALIZING):
            return None

        try:
            api_job = self._api.get_job(self.id)
            if 'status' not in api_job:
                raise QISKitError('get_job didn\'t return status: %s' %
                                  pprint.pformat(api_job))
        # pylint: disable=broad-except
        except Exception as err:
            self._status = JobStatus.ERROR
            self._exception = err
            self._status_msg = '{}'.format(err)
            return None

        if api_job['status'] == 'RUNNING':
            self._status = JobStatus.RUNNING
            self._status_msg = self._status.value
            queued, queue_position = self._is_job_queued(api_job)
            if queued:
                self._status = JobStatus.QUEUED
                self._status_msg = self._status.value
            if queue_position:
                self._queue_position = queue_position

        elif api_job['status'] == 'COMPLETED':
            self._status = JobStatus.DONE
            self._status_msg = self._status.value

        elif api_job['status'] == 'CANCELLED':
            self._status = JobStatus.CANCELLED
            self._status_msg = self._status.value
            self._cancelled = True

        elif 'ERROR' in api_job['status']:
            # ERROR_CREATING_JOB or ERROR_RUNNING_JOB
            self._status = JobStatus.ERROR
            self._status_msg = api_job['status']

        elif self.exception or self._future_submit.exception():
            self._status = JobStatus.ERROR
            if self._future_submit.exception():
                self._exception = self._future_submit.exception()
            self._status_msg = str(self.exception)

        else:
            self._status = JobStatus.ERROR
            self._exception = IBMQJobError(
                'Unrecognized result: \n{}'.format(pprint.pformat(api_job)))
            self._status_msg = '{}'.format(self._exception)

        return api_job

    def _is_job_queued(self, api_job):
        is_queued, position = False, None
        if 'infoQueue' in api_job:
            if 'status' in api_job['infoQueue']:
                queue_status = api_job['infoQueue']['status']
                is_queued = queue_status == 'PENDING_IN_QUEUE'
            if 'position' in api_job['infoQueue']:
                position = api_job['infoQueue']['position']
        return is_queued, position

    @property
    def queued(self):
        """
        Returns whether job is queued.

        Returns:
            bool: True if job is queued, else False.

        Raises:
            QISKitError: couldn't get job status from server
        """
        return self.status['status'] == JobStatus.QUEUED

    @property
    def running(self):
        """
        Returns whether job is actively running

        Returns:
            bool: True if job is running, else False.

        Raises:
            QISKitError: couldn't get job status from server
        """
        return self.status['status'] == JobStatus.RUNNING

    @property
    def done(self):
        """
        Returns True if job successfully finished running.

        Note behavior is slightly different than Future objects which would
        also return true if successfully cancelled.
        """
        return self.status['status'] == JobStatus.DONE

    @property
    def cancelled(self):
        return self._cancelled

    @property
    def exception(self):
        """
        Return Exception object previously raised by job else None

        Returns:
            Exception: exception raised by job
        """
        if isinstance(self._exception, Exception):
            self._status_msg = str(self._exception)
        return self._exception

    @property
    def _is_commercial(self):
        config = self._api.config
        # this check may give false positives so should probably be improved
        return config.get('hub') and config.get('group') and config.get('project')

    @property
    def id(self):
        """
        Return backend determined id (also available in status method).
        """
        # pylint: disable=invalid-name
        while self._id is None and self._status not in self._final_states:
            if self._future_submit.exception():
                self._status = JobStatus.ERROR
                self._exception = self._future_submit.exception()
            # job is initializing and hasn't gotten a id yet.
            time.sleep(0.1)
        return self._id

    @property
    def backend_name(self):
        """
        Return backend name used for this job
        """
        return self._backend_name

    def _submit(self):
        """Submit job to IBM Q.

        Returns:
            dict: submission info including job id from server

        Raises:
            QISKitError: The backend name in the job doesn't match this backend.
            ResultError: If the API reported an error with the submitted job.
            RegisterSizeError: If the requested register size exceeded device
                capability.
        """
        qobj = self._qobj
        api_jobs = []
        for circuit in qobj['circuits']:
            job = {}
            if (('compiled_circuit_qasm' not in circuit) or
                    (circuit['compiled_circuit_qasm'] is None)):
                compiled_circuit = transpile(circuit['circuit'])
                circuit['compiled_circuit_qasm'] = compiled_circuit.qasm(qeflag=True)
            if isinstance(circuit['compiled_circuit_qasm'], bytes):
                job['qasm'] = circuit['compiled_circuit_qasm'].decode()
            else:
                job['qasm'] = circuit['compiled_circuit_qasm']
            if 'name' in circuit:
                job['name'] = circuit['name']
            # convert numpy types for json serialization
            compiled_circuit = json.loads(
                json.dumps(circuit['compiled_circuit'],
                           default=_numpy_type_converter))
            job['metadata'] = {'compiled_circuit': compiled_circuit}
            api_jobs.append(job)
        seed0 = qobj['circuits'][0]['config']['seed']
        hpc = None
        if 'hpc' in qobj['config']:
            try:
                # Use CamelCase when passing the hpc parameters to the API.
                hpc = {
                    'multiShotOptimization':
                        qobj['config']['hpc']['multi_shot_optimization'],
                    'ompNumThreads':
                        qobj['config']['hpc']['omp_num_threads']
                }
            except (KeyError, TypeError):
                hpc = None
        backend_name = qobj['config']['backend_name']
        if backend_name != self._backend_name:
            raise QISKitError("inconsistent qobj backend "
                              "name ({0} != {1})".format(backend_name,
                                                         self._backend_name))
        submit_info = {}
        try:
            submit_info = self._api.run_job(api_jobs, backend=backend_name,
                                            shots=qobj['config']['shots'],
                                            max_credits=qobj['config']['max_credits'],
                                            seed=seed0,
                                            hpc=hpc)
        # pylint: disable=broad-except
        except Exception as err:
            self._status = JobStatus.ERROR
            self._status_msg = str(err)
            self._exception = err
            return None
        if 'error' in submit_info:
            self._status = JobStatus.ERROR
            self._status_msg = str(submit_info['error'])
            self._exception = IBMQJobError(self._status_msg)
            return submit_info
        self._id = submit_info.get('id')
        self.creation_date = submit_info.get('creationDate')
        self._status = JobStatus.QUEUED
        return submit_info

    def _wait_for_job(self, timeout=60, wait=5):
        """Wait until all online ran circuits of a qobj are 'COMPLETED'.

        Args:
            timeout (float or None): seconds to wait for job. If None, wait
                indefinitely.
            wait (float): seconds between queries

        Returns:
            Result: A result object.

        Raises:
            QISKitError: job didn't return status or reported error in status
            TimeoutError: if the job does not return results before an
            specified timeout.
        """
        start_time = time.time()
        api_result = self._update_status()
        while self._status not in self._final_states:
            elapsed_time = time.time() - start_time
            if timeout is not None and elapsed_time >= timeout:
                raise TimeoutError('QISKit timed out')
            logger.info('status = %s (%d seconds)', api_result['status'],
                        elapsed_time)

            if 'status' not in api_result:
                self._exception = QISKitError("get_job didn't return status: %s" %
                                              (pprint.pformat(api_result)))
                raise QISKitError("get_job didn't return status: %s" %
                                  (pprint.pformat(api_result)))

            if (api_result['status'] == 'ERROR_CREATING_JOB' or
                    api_result['status'] == 'ERROR_RUNNING_JOB'):
                job_result = {'id': self._id, 'status': 'ERROR',
                              'result': api_result['status']}
                return Result(job_result)

            time.sleep(wait)
            api_result = self._update_status()

        if self.cancelled:
            job_result = {'id': self._id, 'status': 'CANCELLED',
                          'result': 'job cancelled'}
            return Result(job_result)

        elif self.exception:
            job_result = {'id': self._id, 'status': 'ERROR',
                          'result': str(self.exception)}
            return Result(job_result)

        if api_result is None:
            api_result = self._api.get_job(self._id)

        job_result_list = []
        for circuit_result in api_result['qasms']:
            this_result = {'data': circuit_result['data'],
                           'name': circuit_result.get('name'),
                           'compiled_circuit_qasm': circuit_result.get('qasm'),
                           'status': circuit_result['status']}
            if 'metadata' in circuit_result:
                this_result['metadata'] = circuit_result['metadata']
            job_result_list.append(this_result)
        job_result = {'id': self._id,
                      'status': api_result['status'],
                      'used_credits': api_result.get('usedCredits'),
                      'result': job_result_list}
        job_result['backend_name'] = self.backend_name
        return Result(job_result)
    def test_create_two_spans_streaming(self):
        """Verify that the interceptor captures sub spans in a
        streaming call, within the given trace"""
        class TwoSpanServicer(GRPCTestServerServicer):
            # pylint:disable=C0103
            def ServerStreamingMethod(self, request, context):

                # create another span
                tracer = trace.get_tracer(__name__)
                with tracer.start_as_current_span("child") as child:
                    child.add_event("child event")

                for data in ("one", "two", "three"):
                    yield Response(
                        server_id=request.client_id,
                        response_data=data,
                    )

        # Intercept gRPC calls...
        interceptor = server_interceptor()

        with futures.ThreadPoolExecutor(max_workers=1) as executor:
            server = grpc.server(
                executor,
                options=(("grpc.so_reuseport", 0), ),
                interceptors=[interceptor],
            )
            add_GRPCTestServerServicer_to_server(TwoSpanServicer(), server)
            port = server.add_insecure_port("[::]:0")
            channel = grpc.insecure_channel(f"localhost:{port:d}")

            # setup the RPC
            rpc_call = "/GRPCTestServer/ServerStreamingMethod"
            request = Request(client_id=1, request_data="test")
            msg = request.SerializeToString()
            try:
                server.start()
                list(channel.unary_stream(rpc_call)(msg))
            finally:
                server.stop(None)

        spans_list = self.memory_exporter.get_finished_spans()
        self.assertEqual(len(spans_list), 2)
        child_span = spans_list[0]
        parent_span = spans_list[1]

        self.assertEqual(parent_span.name, rpc_call)
        self.assertIs(parent_span.kind, trace.SpanKind.SERVER)

        # Check version and name in span's instrumentation info
        self.assertEqualSpanInstrumentationInfo(
            parent_span, opentelemetry.instrumentation.grpc)

        # Check attributes
        self.assertSpanHasAttributes(
            parent_span,
            {
                SpanAttributes.NET_PEER_IP: "[::1]",
                SpanAttributes.NET_PEER_NAME: "localhost",
                SpanAttributes.RPC_METHOD: "ServerStreamingMethod",
                SpanAttributes.RPC_SERVICE: "GRPCTestServer",
                SpanAttributes.RPC_SYSTEM: "grpc",
                SpanAttributes.RPC_GRPC_STATUS_CODE:
                grpc.StatusCode.OK.value[0],
            },
        )

        # Check the child span
        self.assertEqual(child_span.name, "child")
        self.assertEqual(parent_span.context.trace_id,
                         child_span.context.trace_id)
Exemplo n.º 16
0
def serve(max_workers=10, port=7777):
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))
    grpc_bt_grpc.add_DeepFakesFaceSwapServicer_to_server(
        DeepFakesFaceSwapServicer(), server)
    server.add_insecure_port("[::]:{}".format(port))
    return server
    def test_concurrent_server_spans(self):
        """Check that concurrent RPC calls don't interfere with each other.

        This is the same check as test_sequential_server_spans except that the
        RPCs are concurrent. Two handlers are invoked at the same time on two
        separate threads. Each one should see a different active span and
        context.
        """

        interceptor = server_interceptor()

        # Capture the currently active span in each thread
        active_spans_in_handler = []
        latch = get_latch(2)

        def handler(request, context):
            latch()
            active_spans_in_handler.append(trace.get_current_span())
            return b""

        with futures.ThreadPoolExecutor(max_workers=2) as executor:
            server = grpc.server(
                executor,
                options=(("grpc.so_reuseport", 0), ),
                interceptors=[interceptor],
            )
            server.add_generic_rpc_handlers((UnaryUnaryRpcHandler(handler), ))

            port = server.add_insecure_port("[::]:0")
            channel = grpc.insecure_channel(f"localhost:{port:d}")

            try:
                server.start()
                # Interleave calls so spans are active on each thread at the same
                # time
                with futures.ThreadPoolExecutor(max_workers=2) as tpe:
                    f1 = tpe.submit(
                        channel.unary_unary("TestServicer/handler"), b"")
                    f2 = tpe.submit(
                        channel.unary_unary("TestServicer/handler"), b"")
                futures.wait((f1, f2))
            finally:
                server.stop(None)

        self.assertEqual(len(active_spans_in_handler), 2)
        # pylint:disable=unbalanced-tuple-unpacking
        span1, span2 = active_spans_in_handler
        # Spans should belong to separate traces
        self.assertNotEqual(span1.context.span_id, span2.context.span_id)
        self.assertNotEqual(span1.context.trace_id, span2.context.trace_id)

        for span in (span1, span2):
            # each should be a root span
            self.assertIsNone(span2.parent)

            # check attributes
            self.assertSpanHasAttributes(
                span,
                {
                    SpanAttributes.NET_PEER_IP:
                    "[::1]",
                    SpanAttributes.NET_PEER_NAME:
                    "localhost",
                    SpanAttributes.RPC_METHOD:
                    "handler",
                    SpanAttributes.RPC_SERVICE:
                    "TestServicer",
                    SpanAttributes.RPC_SYSTEM:
                    "grpc",
                    SpanAttributes.RPC_GRPC_STATUS_CODE:
                    grpc.StatusCode.OK.value[0],
                },
            )
Exemplo n.º 18
0
import sys
import re
import random
import json
from itertools import groupby

import tornado.ioloop
import tornado.web
from concurrent import futures

import path, tools
from configer import conf

MAX_WORKERS = 16

executor = futures.ThreadPoolExecutor(max_workers=MAX_WORKERS)


@conf.register_my_setup(level=2)
def set_up():
    ''' erase all nodes
        this function maybe called for hot deployment
    '''
    Router.mapper = []
    Router.mapper_sentry = {}
    Router.last_sentry = {}
    global logger
    logger = tools.Log().getLog()
    #automic scan dirs and initial all node
    files_list = os.listdir(path._BIZ_PATH)
    files_list = set(
Exemplo n.º 19
0
 def get_thread_pool(self):
     """Override this method to customize how the thread pool is created"""
     return futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
Exemplo n.º 20
0
def generate_new_device_map(local_ip, ip_range=(6, 128)):
    devices_map = {}
    subnet_ip = local_ip.split(".")[0:3]
    subnet_ip = ".".join(subnet_ip)

    logging.info("Scanning attached devices")
    scanned = ["%s.%i" % (subnet_ip, i) for i in range(*ip_range)]
    urls = ["http://%s" % str(s) for s in scanned]

    # We can use a with statement to ensure threads are cleaned up promptly
    with futures.ThreadPoolExecutor(max_workers=128) as executor:
        # Start the load operations and mark each future with its URL

        fs = [executor.submit(scan_one_device, url) for url in urls]
        for f in concurrent.futures.as_completed(fs):

            try:
                id, ip = f.result()
                if id is None:
                    continue
                devices_map[id] = {
                    "ip": ip,
                    "status": "Software broken",
                    "id": id
                }

            except Exception as e:
                logging.error("Error whilst pinging url")
                logging.error(traceback.format_exc(e))
    if len(devices_map) < 1:
        logging.warning("No device detected")
        return devices_map

    logging.info("Detected %i devices:\n%s" %
                 (len(devices_map), str(devices_map.keys())))

    # We can use a with statement to ensure threads are cleaned up promptly
    with futures.ThreadPoolExecutor(max_workers=128) as executor:
        # Start the load operations and mark each future with its URL
        fs = {}
        for id in devices_map.keys():
            fs[executor.submit(update_dev_map_wrapped, devices_map, id)] = id

        for f in concurrent.futures.as_completed(fs):
            id = fs[f]
            try:
                data = f.result()
                devices_map[id].update(data)
            except Exception as e:
                logging.error("Could not get data from device %s :" % id)
                logging.error(traceback.format_exc(e))

    # Adds the active_branch to devices_,map
    with futures.ThreadPoolExecutor(max_workers=128) as executor:
        # Start the load operations and mark each future with its URL
        fs = {}
        for id in devices_map.keys():
            fs[executor.submit(update_dev_map_wrapped,
                               devices_map,
                               id,
                               what='device/active_branch',
                               port='8888')] = id
        for f in concurrent.futures.as_completed(fs):
            id = fs[f]
            try:
                data = f.result()
                devices_map[id].update(data)
            except Exception as e:
                logging.error("Could not get data from device %s :" % id)
                logging.error(traceback.format_exc(e))

    # Adds the check_update to devices_,map
    with futures.ThreadPoolExecutor(max_workers=128) as executor:
        # Start the load operations and mark each future with its URL
        fs = {}
        for id in devices_map.keys():
            fs[executor.submit(update_dev_map_wrapped,
                               devices_map,
                               id,
                               what='device/check_update',
                               port='8888')] = id
        for f in concurrent.futures.as_completed(fs):
            id = fs[f]
            try:
                data = f.result()
                devices_map[id].update(data)
            except Exception as e:
                logging.error("Could not get data from device %s :" % id)
                logging.error(traceback.format_exc(e))

    return devices_map
Exemplo n.º 21
0
 def init_rpc_server(self):
     server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))
     add_NodeServicer_to_server(self, server)
     server.add_insecure_port(f'localhost:{self.port}')
     server.start()
     server.wait_for_termination()
Exemplo n.º 22
0
def start_yatai_service_grpc_server(db_url, repo_base_url, grpc_port, ui_port,
                                    with_ui, s3_endpoint_url):
    from bentoml.yatai.yatai_service_impl import YataiService

    yatai_service = YataiService(
        db_url=db_url,
        repo_base_url=repo_base_url,
        s3_endpoint_url=s3_endpoint_url,
    )
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    add_YataiServicer_to_server(yatai_service, server)
    debug_mode = get_debug_mode()
    if debug_mode:
        try:
            logger.debug('Enabling gRPC server reflection for debugging')
            from grpc_reflection.v1alpha import reflection
            from bentoml.yatai.proto import yatai_service_pb2

            SERVICE_NAMES = (
                yatai_service_pb2.DESCRIPTOR.services_by_name['Yatai'].
                full_name,
                reflection.SERVICE_NAME,
            )
            reflection.enable_server_reflection(SERVICE_NAMES, server)
        except ImportError:
            logger.debug(
                'Failed enabling gRPC server reflection, missing required package: '
                '"pip install grpcio-reflection"')
    server.add_insecure_port(f'[::]:{grpc_port}')
    server.start()
    if with_ui:
        web_ui_log_path = os.path.join(
            config("logging").get("BASE_LOG_DIR"),
            config('logging').get("yatai_web_server_log_filename"),
        )

        ensure_node_available_or_raise()
        yatai_grpc_server_addess = f'localhost:{grpc_port}'
        async_start_yatai_service_web_ui(yatai_grpc_server_addess, ui_port,
                                         web_ui_log_path, debug_mode)

    # We don't import _echo function from click_utils because of circular dep
    click.echo(
        f'* Starting BentoML YataiService gRPC Server\n'
        f'* Debug mode: { "on" if debug_mode else "off"}\n'
        f'* Web UI: {f"running on http://127.0.0.1:{ui_port}" if with_ui else "off"}\n'
        f'* Running on 127.0.0.1:{grpc_port} (Press CTRL+C to quit)\n'
        f'* Usage:\n'
        f'*  Set config: `bentoml config set yatai_service.url=127.0.0.1:{grpc_port}`\n'
        f'*  Set env var: `export BENTOML__YATAI_SERVICE__URL=127.0.0.1:{grpc_port}`\n'
        f'* Help and instructions: '
        f'https://docs.bentoml.org/en/latest/guides/yatai_service.html\n'
        f'{f"* Web server log can be found here: {web_ui_log_path}" if with_ui else ""}'
    )

    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        logger.info("Terminating YataiService gRPC server..")
        server.stop(grace=None)
Exemplo n.º 23
0
def composite(sources, bounds, shape, target_crs, band_count):
    """Composite data from sources into a single raster covering bounds, but in
    the target CRS."""
    # avoid circular dependencies
    from . import _nodata, get_resolution_in_meters, get_source, read_window

    # TODO this belongs in render
    if bounds.crs == target_crs:
        canvas_bounds = bounds
    else:
        canvas_bounds = Bounds(
            warp.transform_bounds(bounds.crs, target_crs, *bounds.bounds),
            target_crs)

    resolution = get_resolution_in_meters(bounds, shape)
    sources = recipes.preprocess(sources, resolution=resolution)

    def _read_window(source):
        with get_source(source.url) as src:
            LOG.info("Compositing %s (%s) as band %s", source.url, source.name,
                     source.band)

            # read a window from the source data
            # TODO ask for a buffer here, get back an updated bounding box
            # reflecting it
            try:
                window_data = read_window(src, canvas_bounds, shape,
                                          source.recipes)
            except Exception as e:
                LOG.exception("Error reading %s: %s", source.url, e)
                return

            return source, PixelCollection(window_data.data,
                                           window_data.bounds, source.band)

    # iterate over available sources, sorted by decreasing "quality"
    with futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() *
                                    5) as executor:
        ws = executor.map(_read_window, sources)

    sources_used = []

    ws = recipes.postprocess(ws)

    canvas_data = np.ma.zeros((band_count, ) + shape,
                              dtype=np.float32,
                              fill_value=_nodata(np.float32))
    canvas_data.mask = True

    canvas = PixelCollection(canvas_data, canvas_bounds)

    for source, window_data in filter(None, ws):
        window_data = recipes.apply(source.recipes, window_data, source=source)

        if window_data.data is None:
            continue

        # paste the resulting data onto a canvas
        canvas = paste(
            PixelCollection(
                window_data.data.astype(np.float32),
                window_data.bounds,
                window_data.band,
            ),
            canvas,
        )
        sources_used.append(source)

        if not canvas.data.mask.any():
            # stop if all pixels are valid
            break

    return map(lambda s: (s.name, s.url), sources_used), canvas
    def _sync(self, depsgraph):
        stage = self.cached_stage.create()

        UsdGeom.SetStageMetersPerUnit(stage, 1)
        UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)

        root_prim = stage.GetPseudoRoot()

        objects_len = sum(1 for _ in object.ObjectData.depsgraph_objects(depsgraph, use_scene_cameras=False))

        objects_stage = Usd.Stage.CreateNew(str(get_temp_file(".usda")))
        object_root_prim = objects_stage.GetPseudoRoot()

        for i, obj_data in enumerate(object.ObjectData.depsgraph_objects_obj(depsgraph, use_scene_cameras=False)):
            if self.render_engine.test_break():
                return

            self.notify_status(0.0, f"Syncing object {i}/{objects_len}: {obj_data.object.name}")

            object.sync(object_root_prim, obj_data)

        for prim in objects_stage.GetPseudoRoot().GetAllChildren():
            override_prim = stage.OverridePrim(root_prim.GetPath().AppendChild(prim.GetName()))
            override_prim.GetReferences().AddReference(objects_stage.GetRootLayer().realPath, prim.GetPath())

        instance_len = sum(1 for _ in object.ObjectData.depsgraph_objects_inst(depsgraph, use_scene_cameras=False))
        chunks = math.ceil(instance_len / CHUNK_COUNT)
        chunks_data = {}

        for i in range(chunks):
            chunk_stage = Usd.Stage.CreateNew(str(get_temp_file(".usda")))
            chunk_prim = stage.OverridePrim(f'/chunk_{i}')
            chunks_data[i] = {'stage': chunk_stage, 'prim': chunk_prim}

        objects_processed = 0
        threadLock = threading.Lock()

        def sync_chunk(idx, stage, prim):
            nonlocal objects_processed

            xform = UsdGeom.Xform.Define(stage, stage.GetPseudoRoot().GetPath().AppendChild(f'chunk_{idx}'))
            obj_prim = xform.GetPrim()

            for i, obj_data in enumerate(object.ObjectData.depsgraph_objects_inst(depsgraph, use_scene_cameras=False)):
                if i >= (idx) * CHUNK_COUNT and i < (idx + 1) * CHUNK_COUNT:
                    with threadLock:
                        objects_processed += 1

                    self.notify_status(0.0, f"Syncing instances: {objects_processed} / {instance_len}")
                    object.sync(obj_prim, obj_data, objects_stage)

            stage.SetDefaultPrim(obj_prim)

        with futures.ThreadPoolExecutor() as executor:
            chunk_sync = []

            for idx in chunks_data:
                chunk_sync.append(executor.submit(sync_chunk, idx,
                                                  chunks_data[idx]['stage'],
                                                  chunks_data[idx]["prim"]))

            for idx, future in enumerate(futures.wait(chunk_sync)):
                if idx == 0:
                    for i in chunks_data:
                        chunk_prim = stage.GetPrimAtPath(chunks_data[i]["prim"].GetPath())
                        chunk_prim.GetReferences().AddReference(chunks_data[i]['stage'].GetRootLayer().realPath)
                pass

        if depsgraph.scene.world is not None:
            world.sync(root_prim, depsgraph.scene.world)

        object.sync(stage.GetPseudoRoot(), object.ObjectData.from_object(depsgraph.scene.camera),
                    scene=depsgraph.scene)
Exemplo n.º 25
0
 async def searcher_async(self):
     with futures.ThreadPoolExecutor() as executor:
         fut = executor.submit(self.searcher)
         return await asyncio.wrap_future(fut)
Exemplo n.º 26
0
def _handle_training(ctx: Context) -> None:
    with futures.ThreadPoolExecutor() as pool:
        trainings = [
            i.result() for i in [
                pool.submit(Training.from_training_scene, j)
                for j in _iter_training_images()
            ]
        ]

    races_with_score = sorted(
        ((i, i.score(ctx)) for i in race.find(ctx)),
        key=lambda x: x[1],
        reverse=True,
    )

    trainings_with_score = [(i, i.score(ctx)) for i in trainings]
    trainings_with_score = sorted(trainings_with_score,
                                  key=lambda x: x[1],
                                  reverse=True)

    expected_score = ctx.expected_score()
    LOGGER.info("expected score:\t%2.2f", expected_score)
    for r, s in races_with_score:
        LOGGER.info("score:\trace:\t%2.2f:\t%s", s, r)
    for t, s in trainings_with_score:
        LOGGER.info("score:\ttraining:\t%2.2f:\t%s", s, t)
    training, training_score = trainings_with_score[0]

    if races_with_score:
        r, s = races_with_score[0]
        if (s > expected_score and
                s > training_score) or (ctx.fan_count < ctx.target_fan_count
                                        and r.estimate_order(ctx) <= 3):
            # go to race
            action.wait_tap_image(templates.RETURN_BUTTON)
            action.wait_tap_image(templates.SINGLE_MODE_COMMAND_RACE)
            tmpl, _ = action.wait_image(
                templates.SINGLE_MODE_RACE_START_BUTTON,
                templates.SINGLE_MODE_CONTINUOUS_RACE_TITLE,
            )
            if tmpl.name == templates.SINGLE_MODE_CONTINUOUS_RACE_TITLE:
                if ctx.continuous_race_count() >= 3:
                    action.wait_tap_image(templates.GREEN_OK_BUTTON)
                else:
                    # continuous race count incorrect, evaluate again:
                    ctx.race_turns.update(
                        range(ctx.turn_count() - 3, ctx.turn_count()))
                    action.wait_tap_image(templates.CANCEL_BUTTON)
                    action.wait_tap_image(
                        templates.SINGLE_MODE_COMMAND_TRAINING)
                    _handle_training(ctx)
                    return
            _choose_race(ctx, r)
            _handle_race(ctx, r)
            return

    if training_score < expected_score:
        # not worth, go rest
        action.tap_image(templates.RETURN_BUTTON)
        action.wait_image(templates.SINGLE_MODE_COMMAND_TRAINING)
        if action.tap_image(templates.SINGLE_MODE_COMMAND_HEALTH_CARE):
            return

        if ctx.mood < ctx.MOOD_VERY_GOOD:
            tmpl, pos = action.wait_image(
                templates.SINGLE_MODE_COMMAND_GO_OUT,
                templates.SINGLE_MODE_COMMAND_SUMMER_REST,
            )
            action.tap(pos)
            action.wait_image_disappear(tmpl)
        else:
            tmpl, pos = (action.wait_image(
                templates.SINGLE_MODE_REST,
                templates.SINGLE_MODE_COMMAND_SUMMER_REST,
            ) if ctx.vitality < 0.8 else action.wait_image(
                templates.SINGLE_MODE_COMMAND_GO_OUT,
                templates.SINGLE_MODE_COMMAND_SUMMER_REST,
            ))
            action.tap(pos)
            action.wait_image_disappear(tmpl)
        time.sleep(0.5)
        if action.count_image(templates.SINGLE_MODE_GO_OUT_MENU_TITLE):
            options_with_score = sorted(
                [(i, i.score(ctx))
                 for i in go_out.Option.from_menu(template.screenshot())],
                key=lambda x: x[1],
            )
            for option, score in options_with_score:
                LOGGER.info("go out option:\t%s:\tscore:%.2f", option, score)
            action.tap(options_with_score[0][0].position)
        return
    x, y = training.confirm_position
    if trainings[-1] != training:
        action.tap((x, y))
        time.sleep(0.1)
    action.tap((x, y))
Exemplo n.º 27
0
        param = param_value[0]
        value = param_value[1]
        if value[-1] != '9':
            newvalue = value[:-1] + str(int(value[-1]) + 1)
        else:
            newvalue = value[:-1] + '0'
        newurl = url.replace(param + '=' + value, param + '=' + newvalue)
        if "^" not in url:
            # get request
            rsp = requests.get(newurl)
        else:
            # post request
            post_url = newurl.split("^")[0]
            data = newurl.split("^")[1]
            rsp = requests.post(post_url,
                                data=data.encode("utf-8"),
                                verify=False,
                                timeout=10)

        if rsp.status_code == 200 and not rsp.history:
            string_to_write = ("Congratulations! unauthorize vul may exist:" +
                               url + "the vul param is:" + param + "\n")
            CLIOutput().good_print(string_to_write)
            with open("%s/result.txt" % current_dir, "a+") as f:
                f.write(string_to_write)


from concurrent import futures
with futures.ThreadPoolExecutor(max_workers=15) as executor:
    executor.map(check, check_url_list)
Exemplo n.º 28
0
def dataframe_to_arrays(df, schema, preserve_index, nthreads=1, columns=None):
    if columns is None:
        columns = df.columns
    column_names = []
    index_columns = []
    index_column_names = []
    type = None

    if preserve_index:
        n = len(getattr(df.index, 'levels', [df.index]))
        index_columns.extend(df.index.get_level_values(i) for i in range(n))

    columns_to_convert = []
    convert_types = []

    if not df.columns.is_unique:
        raise ValueError('Duplicate column names found: {}'.format(
            list(df.columns)))

    for name in columns:
        col = df[name]
        name = _column_name_to_strings(name)

        if schema is not None:
            field = schema.field_by_name(name)
            type = getattr(field, "type", None)

        columns_to_convert.append(col)
        convert_types.append(type)
        column_names.append(name)

    for i, column in enumerate(index_columns):
        columns_to_convert.append(column)
        convert_types.append(None)
        name = _index_level_name(column, i, column_names)
        index_column_names.append(name)

    # NOTE(wesm): If nthreads=None, then we use a heuristic to decide whether
    # using a thread pool is worth it. Currently the heuristic is whether the
    # nrows > 100 * ncols.
    if nthreads is None:
        nrows, ncols = len(df), len(df.columns)
        if nrows > ncols * 100:
            nthreads = pa.cpu_count()
        else:
            nthreads = 1

    def convert_column(col, ty):
        return pa.array(col, from_pandas=True, type=ty)

    if nthreads == 1:
        arrays = [
            convert_column(c, t)
            for c, t in zip(columns_to_convert, convert_types)
        ]
    else:
        from concurrent import futures
        with futures.ThreadPoolExecutor(nthreads) as executor:
            arrays = list(
                executor.map(convert_column, columns_to_convert,
                             convert_types))

    types = [x.type for x in arrays]

    metadata = construct_metadata(df, column_names, index_columns,
                                  index_column_names, preserve_index, types)
    names = column_names + index_column_names
    return names, arrays, metadata
Exemplo n.º 29
0
def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    launchtor_pb2_grpc.add_TorRequesterServicer_to_server(TorRequester(), server)
    server.add_insecure_port('[::]:'+tor_grpc_port)
    server.start()
    server.wait_for_termination()
Exemplo n.º 30
0
    def register_tests(self):
        if self.tests:
            return self.tests

        if not self.rebuild():
            raise RuntimeError("Could not rebuild GStreamer unit tests")

        self.load_tests_info()
        mesontests = self.get_meson_tests()
        to_inspect = []
        all_sublaunchers_tests = set()
        for test in mesontests:
            sublauncher_tests = self.setup_tests_from_sublauncher(test)
            if sublauncher_tests:
                all_sublaunchers_tests |= sublauncher_tests
                continue
            binary = test['cmd'][0]
            test_info = self.check_binary_ts(binary)
            if test_info is True:
                continue
            elif test_info is None:
                test_info = self.check_binary_ts(binary)
                if test_info is None:
                    raise RuntimeError("Test binary %s does not exist"
                                       " even after a full rebuild" % binary)

            with open(binary, 'rb') as f:
                if b"gstcheck" not in f.read():
                    self.tests_info[binary] = [0, []]
                    continue
            to_inspect.append(test)

        if to_inspect:
            executor = conc.ThreadPoolExecutor(
                max_workers=self.options.num_jobs)
            tmp = []
            for test in to_inspect:
                tmp.append(executor.submit(self._list_gst_check_tests, test))

            for e in tmp:
                e.result()

        for test in mesontests:
            name = self.get_test_name(test)
            if name in all_sublaunchers_tests:
                continue
            gst_tests = self.tests_info[test['cmd'][0]][1]
            if os.path.basename(test['cmd'][0]) in \
                    ['gst-tester-1.0', 'gst-tester-1.0.exe']:
                fpath = test['cmd'][1]
                self.add_test(
                    GstValidateSimpleTest(fpath, name, self.options,
                                          self.reporter))
                continue
            if not gst_tests:
                child_env = self.get_child_env(name)
                self.add_test(
                    GstCheckTest(name, self.options, self.reporter, test,
                                 child_env))
            else:
                for ltest in gst_tests:
                    name = self.get_test_name(test) + '.' + ltest
                    child_env = self.get_child_env(name, ltest)
                    self.add_test(
                        GstCheckTest(name, self.options, self.reporter, test,
                                     child_env))
        self.save_tests_info()
        self._registered = True
        return self.tests