def ping(config_file, profile, solver_def, json_output, request_timeout, polling_timeout): """Ping the QPU by submitting a single-qubit problem.""" now = utcnow() info = dict(datetime=now.isoformat(), timestamp=datetime_to_timestamp(now), code=0) def output(fmt, **kwargs): info.update(kwargs) if not json_output: click.echo(fmt.format(**kwargs)) def flush(): if json_output: click.echo(json.dumps(info)) try: _ping(config_file, profile, solver_def, request_timeout, polling_timeout, output) except CLIError as error: output("Error: {error} (code: {code})", error=str(error), code=error.code) sys.exit(error.code) except Exception as error: output("Unhandled error: {error}", error=str(error)) sys.exit(127) finally: flush()
def __init__(self, solver, id_, return_matrix, submission_data): self.solver = solver # Store the query data in case the problem needs to be resubmitted self._submission_data = submission_data # Has the client tried to cancel this job self._cancel_requested = False self._cancel_sent = False self._single_cancel_lock = threading.Lock( ) # Make sure we only call cancel once # Should the results be decoded as python lists or numpy matrices if return_matrix and not _numpy: raise ValueError("Matrix result requested without numpy.") self.return_matrix = return_matrix #: The id the server will use to identify this problem, None until the id is actually known self.id = id_ #: `datetime` the Future was created (immediately before enqueued in Client's submit queue) self.time_created = utcnow() #: `datetime` corresponding to the time when the problem was accepted by the server (None before then) self.time_received = None #: `datetime` corresponding to the time when the problem was completed by the server (None before then) self.time_solved = None #: `datetime` the Future was resolved (marked as done; succeeded or failed), or None before then self.time_resolved = None # estimated `earliest_completion_time` as returned on problem submit self.eta_min = None # estimated `latest_completion_time` as returned on problem submit self.eta_max = None # Track how long it took us to parse the data self.parse_time = None # approx. server-client clocks difference in seconds self.clock_diff = None # Data from the server before it is parsed self._message = None #: Status flag most recently returned by the server self.remote_status = None # Data from the server after it is parsed (either data or an error) self._result = None self.error = None # Event(s) to signal when the results are ready self._results_ready_event = threading.Event() self._other_events = [] # current poll back-off interval, in seconds self._poll_backoff = None
def wrapped(*args, **kwargs): # text/json output taken from callee args json_output = kwargs.get('json_output', False) now = utcnow() info = dict(datetime=now.isoformat(), timestamp=datetime_to_timestamp(now), code=0) def output(fmt, maxlen=None, **params): info.update(params) if not json_output: msg = fmt.format(**params) if maxlen is not None: msg = strtrunc(msg, maxlen) click.echo(msg) def flush(): if json_output: click.echo(json.dumps(info)) try: fn(*args, output=output, **kwargs) except CLIError as error: output("Error: {error} (code: {code})", error=str(error), code=error.code) sys.exit(error.code) except Exception as error: output("Unhandled error: {error}", error=str(error)) sys.exit(127) finally: flush()
def __init__(self, solver, id_, return_matrix=False): self.solver = solver # Has the client tried to cancel this job self._cancel_requested = False self._cancel_sent = False self._single_cancel_lock = threading.Lock( ) # Make sure we only call cancel once # ID readiness notification self._id_ready_event = threading.Event() # Should the results be decoded as python lists or numpy matrices if return_matrix and not _numpy: raise ValueError("Matrix result requested without numpy.") self.return_matrix = return_matrix #: The id the server will use to identify this problem, None until the id is actually known self.id = id_ #: Problem label, as (optionally) set on submission. None until parsed from a response. self.label = None #: `datetime` the Future was created (immediately before enqueued in Client's submit queue) self.time_created = utcnow() #: `datetime` corresponding to the time when the problem was accepted by the server (None before then) self.time_received = None #: `datetime` corresponding to the time when the problem was completed by the server (None before then) self.time_solved = None #: `datetime` the Future was resolved (marked as done; succeeded or failed), or None before then self.time_resolved = None # Track how long it took us to parse the data self.parse_time = None # approx. server-client clocks difference in seconds self.clock_diff = None # Data from the server before it is parsed self._message = None #: Status flag most recently returned by the server self.remote_status = None # Data from the server after it's parsed self._result = None self._exception = None # Event(s) to signal when the results are ready self._results_ready_event = threading.Event() self._other_events = [] # current poll back-off interval, in seconds self._poll_backoff = None # XXX: energy offset carried via Future, until implemented in SAPI self._offset = 0
def next(self, state, **runopts): data = self.key(state) record = dict(time=utcnow(), timestamp=time.monotonic(), data=data) if self.extra is not None: record.update(self.extra) msg = json.dumps(record, cls=OceanEncoder) if self.outfile: print(msg, file=self.outfile, flush=not self.buffering) if self.records is not None: self._append_record(record) if self.loglevel: logger.log(self.loglevel, f"{self.name} Record({msg})") return state
def _poll(self, future): """Enqueue a problem to poll the server for status.""" if future._poll_backoff is None: # on first poll, start with minimal back-off future._poll_backoff = self._POLL_BACKOFF_MIN # if we have ETA of results, schedule the first poll for then if future.eta_min and self._is_clock_diff_acceptable(future): at = datetime_to_timestamp(future.eta_min) _LOGGER.debug("Response ETA indicated and local clock reliable. " "Scheduling first polling at +%.2f sec", at - epochnow()) else: at = time.time() + future._poll_backoff _LOGGER.debug("Response ETA not indicated, or local clock unreliable. " "Scheduling first polling at +%.2f sec", at - epochnow()) else: # update exponential poll back-off, clipped to a range future._poll_backoff = \ max(self._POLL_BACKOFF_MIN, min(future._poll_backoff * 2, self._POLL_BACKOFF_MAX)) # for poll priority we use timestamp of next scheduled poll at = time.time() + future._poll_backoff now = utcnow() future_age = (now - future.time_created).total_seconds() _LOGGER.debug("Polling scheduled at %.2f with %.2f sec new back-off for: %s (future's age: %.2f sec)", at, future._poll_backoff, future.id, future_age) # don't enqueue for next poll if polling_timeout is exceeded by then future_age_on_next_poll = future_age + (at - datetime_to_timestamp(now)) if self.polling_timeout is not None and future_age_on_next_poll > self.polling_timeout: _LOGGER.debug("Polling timeout exceeded before next poll: %.2f sec > %.2f sec, aborting polling!", future_age_on_next_poll, self.polling_timeout) raise PollingTimeout self._poll_queue.put((at, future))
def _signal_ready(self): """Signal all the events waiting on this future.""" self.time_resolved = utcnow() self._results_ready_event.set() [ev.set() for ev in self._other_events]
def test_datetime(self): dt = utcnow() self.assertEqual(json.dumps(dt, cls=OceanEncoder), json.dumps(dt.isoformat()))
def test_utcnow(self): t = utcnow() now = datetime.utcnow() self.assertEqual(t.utcoffset().total_seconds(), 0.0) unaware = t.replace(tzinfo=None) self.assertLess((now - unaware).total_seconds(), 1.0)
def ping(config_file, profile, json_output, request_timeout, polling_timeout): """Ping the QPU by submitting a single-qubit problem.""" def output_error(msg, *values): if json_output: click.echo(json.dumps({"error": msg.format(*values)})) else: click.echo(msg.format(*values)) now = utcnow() info = dict(datetime=now.isoformat(), timestamp=datetime_to_timestamp(now)) def stage_info(msg, **kwargs): info.update(kwargs) if not json_output: click.echo(msg.format(**kwargs)) def flush_info(): if json_output: click.echo(json.dumps(info)) config = dict(config_file=config_file, profile=profile) if request_timeout is not None: config.update(request_timeout=request_timeout) if polling_timeout is not None: config.update(polling_timeout=polling_timeout) try: client = Client.from_config(**config) except Exception as e: output_error("Invalid configuration: {!r}", e) return 1 if config_file: stage_info("Using configuration file: {config_file}", config_file=config_file) if profile: stage_info("Using profile: {profile}", profile=profile) stage_info("Using endpoint: {endpoint}", endpoint=client.endpoint) t0 = timer() try: solvers = client.get_solvers() except SolverAuthenticationError: output_error( "Authentication error. Check credentials in your configuration file." ) return 2 except (InvalidAPIResponseError, UnsupportedSolverError): output_error("Invalid or unexpected API response.") return 3 except RequestTimeout: output_error("API connection timed out.") return 4 except Exception as e: output_error("Unexpected error: {!r}", e) return 5 try: solver = client.get_solver() except (ValueError, KeyError): # if not otherwise defined (ValueError), or unavailable (KeyError), # just use the first solver if solvers: _, solver = next(iter(solvers.items())) else: output_error("No solvers available.") return 6 except RequestTimeout: output_error("API connection timed out.") return 7 t1 = timer() stage_info("Using solver: {solver_id}", solver_id=solver.id) try: future = solver.sample_ising({0: 1}, {}) timing = future.timing except RequestTimeout: output_error("API connection timed out.") return 8 except PollingTimeout: output_error("Polling timeout exceeded.") return 9 except Exception as e: output_error("Sampling error: {!r}", e) return 10 t2 = timer() stage_info("\nWall clock time:") stage_info( " * Solver definition fetch: {wallclock_solver_definition:.3f} ms", wallclock_solver_definition=(t1 - t0) * 1000.0) stage_info( " * Problem submit and results fetch: {wallclock_sampling:.3f} ms", wallclock_sampling=(t2 - t1) * 1000.0) stage_info(" * Total: {wallclock_total:.3f} ms", wallclock_total=(t2 - t0) * 1000.0) stage_info("\nQPU timing:") for component, duration in timing.items(): stage_info(" * %(name)s = {%(name)s} us" % {"name": component}, **{component: duration}) flush_info()