def test_participant_selection(raiden_network, token_addresses): # pylint: disable=too-many-locals registry_address = raiden_network[0].raiden.default_registry.address one_to_n_address = raiden_network[0].raiden.default_one_to_n_address token_address = token_addresses[0] # connect the first node (will register the token if necessary) RaidenAPI(raiden_network[0].raiden).token_network_connect( registry_address=registry_address, token_address=token_address, funds=TokenAmount(100)) # Test invalid argument values with pytest.raises(InvalidAmount): RaidenAPI(raiden_network[0].raiden).token_network_connect( registry_address=registry_address, token_address=token_address, funds=TokenAmount(-1)) with pytest.raises(InvalidAmount): RaidenAPI(raiden_network[0].raiden).token_network_connect( registry_address=registry_address, token_address=token_address, funds=TokenAmount(100), joinable_funds_target=2, ) with pytest.raises(InvalidAmount): RaidenAPI(raiden_network[0].raiden).token_network_connect( registry_address=registry_address, token_address=token_address, funds=TokenAmount(100), joinable_funds_target=-1, ) # connect the other nodes connect_greenlets = [ gevent.spawn( RaidenAPI(app.raiden).token_network_connect, registry_address, token_address, 100) for app in raiden_network[1:] ] gevent.wait(connect_greenlets) token_network_registry_address = views.get_token_network_address_by_token_address( views.state_from_raiden(raiden_network[0].raiden), token_network_registry_address=registry_address, token_address=token_address, ) connection_managers = [ app.raiden.connection_manager_for_token_network( token_network_registry_address) for app in raiden_network ] unsaturated_connection_managers = connection_managers[:] exception = AssertionError("Unsaturated connection managers", unsaturated_connection_managers) with gevent.Timeout(120, exception): while unsaturated_connection_managers: for manager in unsaturated_connection_managers: if is_manager_saturated(manager, registry_address, token_address): unsaturated_connection_managers.remove(manager) gevent.sleep(1) assert saturated_count(connection_managers, registry_address, token_address) == len(connection_managers) # ensure unpartitioned network for app in raiden_network: node_state = views.state_from_raiden(app.raiden) network_state = views.get_token_network_by_token_address( node_state, registry_address, token_address) assert network_state is not None for target in raiden_network: if target.raiden.address == app.raiden.address: continue routes, _ = routing.get_best_routes( chain_state=node_state, token_network_address=network_state.address, one_to_n_address=one_to_n_address, from_address=app.raiden.address, to_address=target.raiden.address, amount=PaymentAmount(1), previous_address=None, config={}, privkey=b"", # not used if pfs is not configured ) assert routes is not None # create a transfer to the leaving node, so we have a channel to settle for app in raiden_network: sender = app.raiden sender_channel = next( (channel_state for channel_state in RaidenAPI( sender).get_channel_list(registry_address=registry_address, token_address=token_address) if channel_state.our_state.contract_balance > 0 and channel_state.partner_state.contract_balance > 0), None, ) # choose a fully funded channel from sender if sender_channel: break assert sender_channel registry_address = sender.default_registry.address receiver = next( app.raiden for app in raiden_network if app.raiden.address == sender_channel.partner_state.address) # assert there is a direct channel receiver -> sender (vv) receiver_channel = RaidenAPI(receiver).get_channel_list( registry_address=registry_address, token_address=token_address, partner_address=sender.address, ) assert len(receiver_channel) == 1 with gevent.Timeout(30, exception=ValueError("partner not reachable")): waiting.wait_for_healthy(sender, receiver.address, PaymentAmount(1)) with watch_for_unlock_failures(*raiden_network): amount = PaymentAmount(1) RaidenAPI(sender).transfer_and_wait(registry_address, token_address, amount, receiver.address, transfer_timeout=10) with gevent.Timeout( 30, exception=ValueError( "timeout while waiting for incoming transaction")): wait_for_transaction(receiver, registry_address, token_address, sender.address) # test `leave()` method connection_manager = connection_managers[0] timeout = (sender_channel.settle_timeout * connection_manager.raiden.proxy_manager.estimate_blocktime() * 10) assert timeout > 0 channels = views.list_channelstate_for_tokennetwork( chain_state=views.state_from_raiden(connection_manager.raiden), token_network_registry_address=registry_address, token_address=token_address, ) channel_identifiers = [channel.identifier for channel in channels] with gevent.Timeout( timeout, exception=ValueError("timeout while waiting for leave")): # sender leaves the network RaidenAPI(sender).token_network_leave(registry_address, token_address) with gevent.Timeout(timeout, exception=ValueError( f"Channels didnt get settled after {timeout}")): waiting.wait_for_settle( raiden=connection_manager.raiden, token_network_registry_address=registry_address, token_address=token_address, channel_ids=channel_identifiers, retry_timeout=0.1, )
def add_watch(self, file_path, on_change, watch_type='config', keep_retrying=False, backoff_in_secs=10, max_wait_in_secs=300): """Adds a watch for an existing path. Args: file_path: Must be an existent file, otherwise raises on_change: A callback function on file change watch_type: The watch type parameter can be 'config' or 'serverset'. keep_retrying: Whether to keep retrying to add this file watch in case of failure. By default it is ``False``. backoff_in_secs: how much to backoff between retries. retry backoff will be exponentially backoff. max_wait_in_secs: max wait in seconds between retries. """ num_tries = 0 assert file_path assert on_change self._validate_watch_type(watch_type) if not keep_retrying: # If not keep retrying, this is a synchronous call, # if fails then it fails. last_update_time = os.path.getmtime(file_path) else: # Running the background greenlet, keep retrying until the # serverset file is there. while True: if os.path.isfile(file_path): last_update_time = os.path.getmtime(file_path) break else: log.exception("non existing file of type %s, on %s" % ( watch_type, file_path)) # exponential backoff gevent.sleep(self._calculate_backoff_time( num_tries, backoff_in_secs, max_wait_in_secs)) num_tries += 1 num_tries = 0 file_path_stat_name = _escape_path_for_stats_name(file_path) log.info("try to add file watch of type %s, on %s" % ( watch_type, file_path)) while keep_retrying or num_tries == 0: try: # Read the file and make the initial onUpdate call. with gevent.Timeout(seconds=30): with open(file_path, 'r') as f: file_content = f.read() try: self._invoke_callback( on_change, watch_type, file_content, self.Stat(version=last_update_time)) except Exception: log.exception("Exception in watcher callback for %s, " "ignoring" % file_path) log.debug('The last modified timestamp of file %s is %f' % (file_path, last_update_time)) md5_hash = self._compute_md5_hash(file_content) log.debug('The md5 hash of file %s is %s' % ( file_path, md5_hash)) key = (file_path, watch_type) if key in self._watched_file_map: # Append on_change to the file_path's existing callbacks self._watched_file_map[key][2].append(on_change) else: self._watched_file_map[key] = ( last_update_time, md5_hash, [on_change]) log.info("successfully added file watch of type %s, on %s" % ( watch_type, file_path)) break except Exception, gevent.Timeout: log.exception("failed to add file watch of type %s, on %s" % ( watch_type, file_path)) self._sc.increment( "errors.file.{}watch.failure.{}.{}".format( watch_type, file_path_stat_name, hostname), sample_rate=1) if keep_retrying: # exponential backoff gevent.sleep(self._calculate_backoff_time( num_tries, backoff_in_secs, max_wait_in_secs)) num_tries += 1 else: raise
def timeout_ctx(self): return gevent.Timeout(self.cfg.keepalive, False)
def start(self, user_count: int, spawn_rate: float, **kwargs) -> None: num_workers = len(self.clients.ready) + len(self.clients.running) + len(self.clients.spawning) if not num_workers: logger.warning( "You are running in distributed mode but have no worker servers connected. " "Please connect workers prior to swarming." ) return for user_class in self.user_classes: if self.environment.host is not None: user_class.host = self.environment.host self.target_user_classes_count = weight_users(self.user_classes, user_count) self.spawn_rate = spawn_rate logger.info( "Sending spawn jobs of %d users at %.2f spawn rate to %d ready clients" % (user_count, spawn_rate, num_workers) ) worker_spawn_rate = float(spawn_rate) / (num_workers or 1) if worker_spawn_rate > 100: logger.warning( "Your selected spawn rate is very high (>100/worker), and this is known to sometimes cause issues. Do you really need to ramp up that fast?" ) # Since https://github.com/locustio/locust/pull/1621, the master is responsible for dispatching and controlling # the total spawn rate which is more CPU intensive for the master. The number 200 is a little arbitrary as the computational # load on the master greatly depends on the number of workers and the number of user classes. For instance, # 5 user classes and 5 workers can easily do 200/s. However, 200/s with 50 workers and 20 user classes will likely make the # dispatch very slow because of the required computations. I (@mboutet) doubt that many Locust's users are # spawning that rapidly. If so, then they'll likely open issues on GitHub in which case I'll (@mboutet) take a look. if spawn_rate > 200: logger.warning( "Your selected total spawn rate is quite high (>200), and this is known to sometimes cause performance issues on the master. " "Do you really need to ramp up that fast? If so and if encountering performance issues on the master, free to open an issue." ) if self.state != STATE_RUNNING and self.state != STATE_SPAWNING: self.stats.clear_all() self.exceptions = {} self.environment.events.test_start.fire(environment=self.environment) if self.environment.shape_class: self.environment.shape_class.reset_time() self.update_state(STATE_SPAWNING) try: for dispatched_users in UsersDispatcher( worker_nodes=self.clients.ready + self.clients.running + self.clients.spawning, user_classes_count=self.target_user_classes_count, spawn_rate=spawn_rate, ): dispatch_greenlets = Group() for worker_node_id, worker_user_classes_count in dispatched_users.items(): data = { "timestamp": time.time(), "user_classes_count": worker_user_classes_count, "host": self.environment.host, "stop_timeout": self.environment.stop_timeout, } dispatch_greenlets.add( gevent.spawn_later( 0, self.server.send_to_client, Message("spawn", data, worker_node_id), ) ) dispatched_user_count = sum(map(sum, map(methodcaller("values"), dispatched_users.values()))) logger.debug( "Sending spawn messages for %g total users to %i client(s)", dispatched_user_count, len(dispatch_greenlets), ) dispatch_greenlets.join() logger.debug( "Currently spawned users: %s" % _format_user_classes_count_for_log(self.reported_user_classes_count) ) except KeyboardInterrupt: # We need to catch keyboard interrupt. Otherwise, if KeyboardInterrupt is received while in # a gevent.sleep inside the dispatch_users function, locust won't gracefully shutdown. self.quit() # Wait a little for workers to report their users to the master # so that we can give an accurate log message below and fire the `spawning_complete` event # when the user count is really at the desired value. timeout = gevent.Timeout(self._wait_for_workers_report_after_ramp_up()) timeout.start() try: while self.user_count != self.target_user_count: gevent.sleep() except gevent.Timeout: pass finally: timeout.cancel() self.environment.events.spawning_complete.fire(user_count=sum(self.target_user_classes_count.values())) logger.info("All users spawned: %s" % _format_user_classes_count_for_log(self.reported_user_classes_count))
def wait_motor_ready(self, mot_hwobj, timeout=30): with gevent.Timeout(timeout, RuntimeError("Motor not ready")): while mot_hwobj.is_moving(): gevent.sleep(0.5)
def publish_to_historian(self, to_publish_list): if self.cache_only: _log.warning("cache_only enabled") return handled_records = [] _log.debug("publish_to_historian number of items: {}".format( len(to_publish_list))) parsed = urlparse(self.core.address) next_dest = urlparse(self.destination_vip) current_time = self.timestamp() last_time = self._last_timeout _log.debug('Lasttime: {} currenttime: {}'.format( last_time, current_time)) timeout_occurred = False if self._last_timeout: # if we failed we need to wait 60 seconds before we go on. if self.timestamp() < self._last_timeout + 60: _log.debug('Not allowing send < 60 seconds from failure') return if not self._target_platform: self.historian_setup() if not self._target_platform: _log.debug('Could not connect to target') return for vip_id in self.required_target_agents: try: self._target_platform.vip.ping(vip_id).get() except Unreachable: skip = "Skipping publish: Target platform not running " \ "required agent {}".format(vip_id) _log.warn(skip) self.vip.health.set_status(STATUS_BAD, skip) return except Exception as e: err = "Unhandled error publishing to target platform." _log.error(err) _log.error(traceback.format_exc()) self.vip.health.set_status(STATUS_BAD, err) return for x in to_publish_list: topic = x['topic'] value = x['value'] # payload = jsonapi.loads(value) payload = value headers = payload['headers'] headers['X-Forwarded'] = True if 'X-Forwarded-From' in headers: if not isinstance(headers['X-Forwarded-From'], list): headers['X-Forwarded-From'] = [headers['X-Forwarded-From']] headers['X-Forwarded-From'].append(self.instance_name) else: headers['X-Forwarded-From'] = self.instance_name try: del headers['Origin'] except KeyError: pass try: del headers['Destination'] except KeyError: pass if self.gather_timing_data: add_timing_data_to_header( headers, self.core.agent_uuid or self.core.identity, "forwarded") if timeout_occurred: _log.error( 'A timeout has occurred so breaking out of publishing') break with gevent.Timeout(30): try: # _log.debug('debugger: {} {} {}'.format(topic, # headers, # payload)) self._target_platform.vip.pubsub.publish( peer='pubsub', topic=topic, headers=headers, message=payload['message']).get() except gevent.Timeout: _log.debug("Timeout occurred email should send!") timeout_occurred = True self._last_timeout = self.timestamp() self._num_failures += 1 # Stop the current platform from attempting to # connect self.historian_teardown() self.vip.health.set_status(STATUS_BAD, "Timeout occured") except Unreachable: _log.error("Target not reachable. Wait till it's ready!") except ZMQError as exc: if exc.errno == ENOTSOCK: # Stop the current platform from attempting to # connect _log.error( "Target disconnected. Stopping target platform agent" ) self.historian_teardown() self.vip.health.set_status( STATUS_BAD, "Target platform disconnected") except Exception as e: err = "Unhandled error publishing to target platfom." _log.error(err) _log.error(traceback.format_exc()) self.vip.health.set_status(STATUS_BAD, err) # Before returning lets mark any that weren't errors # as sent. self.report_handled(handled_records) return else: handled_records.append(x) _log.debug("handled: {} number of items".format(len(to_publish_list))) self.report_handled(handled_records) if timeout_occurred: _log.debug('Sending alert from the ForwardHistorian') status = Status.from_json(self.vip.health.get_status()) self.vip.health.send_alert(FORWARD_TIMEOUT_KEY, status) else: self.vip.health.set_status( STATUS_GOOD, "published {} items".format(len(to_publish_list)))
def setUp(self): super().setUp() self.timeout = gevent.Timeout(10) self.timeout.start()
def raiden_network( token_addresses, token_network_registry_address, channels_per_node, deposit, settle_timeout, chain_id, blockchain_services, endpoint_discovery_services, raiden_udp_ports, reveal_timeout, database_paths, retry_interval, retries_before_backoff, throttle_capacity, throttle_fill_rate, nat_invitation_timeout, nat_keepalive_retries, nat_keepalive_timeout, local_matrix_server, ): raiden_apps = create_apps( chain_id, blockchain_services.blockchain_services, endpoint_discovery_services, token_network_registry_address, blockchain_services.secret_registry.address, raiden_udp_ports, reveal_timeout, settle_timeout, database_paths, retry_interval, retries_before_backoff, throttle_capacity, throttle_fill_rate, nat_invitation_timeout, nat_keepalive_retries, nat_keepalive_timeout, local_matrix_server, ) start_tasks = [gevent.spawn(app.raiden.start) for app in raiden_apps] gevent.joinall(start_tasks, raise_error=True) app_channels = create_network_channels( raiden_apps, channels_per_node, ) greenlets = [] for token_address in token_addresses: for app_pair in app_channels: greenlets.append( gevent.spawn( payment_channel_open_and_deposit, app_pair[0], app_pair[1], token_address, deposit, settle_timeout, )) gevent.joinall(greenlets, raise_error=True) exception = RuntimeError( '`raiden_network` fixture setup failed, nodes are unreachable') with gevent.Timeout(seconds=30, exception=exception): wait_for_channels( app_channels, blockchain_services.deploy_registry.address, token_addresses, deposit, ) # Force blocknumber update exception = RuntimeError( 'Alarm failed to start and set up start_block correctly') with gevent.Timeout(seconds=5, exception=exception): wait_for_alarm_start(raiden_apps) yield raiden_apps shutdown_apps_and_cleanup_tasks(raiden_apps)
def raiden_chain( token_addresses, token_network_registry_address, channels_per_node, deposit, settle_timeout, chain_id, blockchain_services, endpoint_discovery_services, raiden_udp_ports, reveal_timeout, database_paths, retry_interval, retries_before_backoff, throttle_capacity, throttle_fill_rate, nat_invitation_timeout, nat_keepalive_retries, nat_keepalive_timeout, local_matrix_server, ): if len(token_addresses) != 1: raise ValueError('raiden_chain only works with a single token') assert channels_per_node in (0, 1, 2, CHAIN), ( 'deployed_network uses create_sequential_network that can only work ' 'with 0, 1 or 2 channels') raiden_apps = create_apps( chain_id, blockchain_services.blockchain_services, endpoint_discovery_services, token_network_registry_address, blockchain_services.secret_registry.address, raiden_udp_ports, reveal_timeout, settle_timeout, database_paths, retry_interval, retries_before_backoff, throttle_capacity, throttle_fill_rate, nat_invitation_timeout, nat_keepalive_retries, nat_keepalive_timeout, local_matrix_server, ) start_tasks = [gevent.spawn(app.raiden.start) for app in raiden_apps] gevent.joinall(start_tasks, raise_error=True) from_block = 0 for app in raiden_apps: app.raiden.install_all_blockchain_filters( app.raiden.default_registry, app.raiden.default_secret_registry, from_block, ) app_channels = create_sequential_channels( raiden_apps, channels_per_node, ) channel_greenlets = [] for token_address in token_addresses: for app_pair in app_channels: channel_greenlets.append( gevent.spawn( payment_channel_open_and_deposit, app_pair[0], app_pair[1], token_address, deposit, settle_timeout, )) gevent.joinall(channel_greenlets, raise_error=True) exception = RuntimeError( '`raiden_chain` fixture setup failed, nodes are unreachable') with gevent.Timeout(seconds=30, exception=exception): wait_for_channels( app_channels, blockchain_services.deploy_registry.address, token_addresses, deposit, ) yield raiden_apps shutdown_apps_and_cleanup_tasks(raiden_apps)
def wagoOut(self): with gevent.Timeout(5): self.wago_controller.set(self.command_key, 0) while self.wago_controller.get(self.out_key) == 0: time.sleep(0.5)
'info_hash': hashlib.sha1(self.address).digest(), 'peer_id': my_peer_id, 'port': fileserver_port, 'uploaded': 0, 'downloaded': 0, 'left': 0, 'compact': 1, 'numwant': 30, 'event': 'started' } req = None try: url = "http://" + tracker_address + "?" + urllib.urlencode( params) # Load url with gevent.Timeout(30, False): # Make sure of timeout req = urllib2.urlopen(url, timeout=25) response = req.read() req.fp._sock.recv = None # Hacky avoidance of memory leak for older python versions req.close() req = None if not response: self.log.debug("Http tracker %s response error" % url) return False # Decode peers peer_data = bencode.decode(response)["peers"] response = None peer_count = len(peer_data) / 6 peers = [] for peer_offset in xrange(peer_count): off = 6 * peer_offset
def __init__(self, span: ServerSpan, timeout_seconds: float, debug: bool): exception = ServerTimeout(span.name, timeout_seconds, debug) self.timeout = gevent.Timeout(timeout_seconds, exception)
def test_participant_selection(raiden_network, token_addresses, skip_if_tester): registry_address = raiden_network[0].raiden.default_registry.address # pylint: disable=too-many-locals token_address = token_addresses[0] # connect the first node (will register the token if necessary) RaidenAPI(raiden_network[0].raiden).token_network_connect( registry_address, token_address, 100, ) # connect the other nodes connect_greenlets = [ gevent.spawn( RaidenAPI(app.raiden).token_network_connect, registry_address, token_address, 100, ) for app in raiden_network[1:] ] gevent.wait(connect_greenlets) token_network_registry_address = views.get_token_network_identifier_by_token_address( views.state_from_raiden(raiden_network[0].raiden), payment_network_id=registry_address, token_address=token_address, ) connection_managers = [ app.raiden.connection_manager_for_token_network( token_network_registry_address, ) for app in raiden_network ] unsaturated_connection_managers = connection_managers[:] with gevent.Timeout( 120, AssertionError('Unsaturated connection managers', unsaturated_connection_managers), ): while unsaturated_connection_managers: for manager in unsaturated_connection_managers: if is_manager_saturated(manager, registry_address, token_address): unsaturated_connection_managers.remove(manager) gevent.sleep(1) assert saturated_count( connection_managers, registry_address, token_address, ) == len(connection_managers) # ensure unpartitioned network for app in raiden_network: node_state = views.state_from_raiden(app.raiden) network_state = views.get_token_network_by_token_address( node_state, registry_address, token_address, ) assert network_state is not None for target in raiden_network: if target.raiden.address == app.raiden.address: continue routes = routing.get_best_routes( node_state, network_state.address, app.raiden.address, target.raiden.address, 1, None, ) assert routes is not None # create a transfer to the leaving node, so we have a channel to settle sender = raiden_network[-1].raiden receiver = raiden_network[0].raiden registry_address = sender.default_registry.address # assert there is a direct channel receiver -> sender (vv) receiver_channel = RaidenAPI(receiver).get_channel_list( registry_address=registry_address, token_address=token_address, partner_address=sender.address, ) assert len(receiver_channel) == 1 receiver_channel = receiver_channel[0] # assert there is a direct channel sender -> receiver sender_channel = RaidenAPI(sender).get_channel_list( registry_address=registry_address, token_address=token_address, partner_address=receiver.address, ) assert len(sender_channel) == 1 sender_channel = sender_channel[0] exception = ValueError('partner not reachable') with gevent.Timeout(30, exception=exception): waiting.wait_for_healthy(sender, receiver.address, 1) amount = 1 RaidenAPI(sender).transfer_and_wait( registry_address, token_address, amount, receiver.address, transfer_timeout=10, ) exception = ValueError('timeout while waiting for incoming transaction') with gevent.Timeout(30, exception=exception): wait_for_transaction( receiver, registry_address, token_address, sender.address, ) # test `leave()` method connection_manager = connection_managers[0] timeout = (sender_channel.settle_timeout * connection_manager.raiden.chain.estimate_blocktime() * 10) assert timeout > 0 exception = ValueError('timeout while waiting for leave') with gevent.Timeout(timeout, exception=exception): RaidenAPI(raiden_network[0].raiden).token_network_leave( registry_address, token_address, ) before_block = connection_manager.raiden.chain.block_number() wait_blocks = sender_channel.settle_timeout + 10 # wait until both chains are synced? wait_until_block( connection_manager.raiden.chain, before_block + wait_blocks, ) wait_until_block( receiver.chain, before_block + wait_blocks, ) receiver_channel = RaidenAPI(receiver).get_channel_list( registry_address=registry_address, token_address=token_address, partner_address=sender.address, ) assert receiver_channel[0].settle_transaction is not None
# For printing in errors return os.forkpty()[0] funcs = (os.fork, forkpty) else: funcs = (os.fork, ) for func in funcs: awaiting_child = [True] pid = func() if not pid: # child gevent.sleep(0.3) sys.exit(0) else: timeout = gevent.Timeout(1) try: while awaiting_child: gevent.sleep(0.01) # We should now be able to waitpid() for an arbitrary child wpid, status = os.waitpid(-1, os.WNOHANG) if wpid != pid: raise AssertionError( "Failed to wait on a child pid forked with a function", wpid, pid, func) # And a second call should raise ECHILD try: wpid, status = os.waitpid(-1, os.WNOHANG) raise AssertionError("Should not be able to wait again") except OSError as e:
def move_beam_to_center(self): """Calls pitch scan and 3 times detects beam shape and moves horizontal and vertical motors. """ gui_log = logging.getLogger("GUI") gui_msg = "" step = 10 finished = False try: if HWR.beamline.session.beamline_name == "P13": # Beam centering procedure for P13 --------------------------------- log_msg = "Executing pitch scan" gui_log.info("Beam centering: %s" % log_msg) self.emit("progressStep", step, log_msg) if HWR.beamline.energy.get_value() <= 8.75: self.cmd_set_qbmp_range(0) else: self.cmd_set_qbmp_range(1) gevent.sleep(0.2) self.cmd_start_pitch_scan(1) gevent.sleep(3) with gevent.Timeout( 20, Exception("Timeout waiting for pitch scan ready")): while self.chan_pitch_scan_status.getValue() == 1: gevent.sleep(0.1) gevent.sleep(3) self.cmd_set_vmax_pitch(1) """ qbpm_arr = self.chan_qbpm_ar.getValue() if max(qbpm_arr) < 10: gui_log.error("Beam alignment failed! Pitch scan failed.") self.emit("progressStop", ()) return """ step += 1 log_msg = "Detecting beam position and centering the beam" gui_log.info("Beam centering: %s" % log_msg) self.emit("progressStep", step, log_msg) for i in range(3): with gevent.Timeout(10, False): beam_pos_displacement = [None, None] while None in beam_pos_displacement: beam_pos_displacement = HWR.beamline.sample_view.get_beam_displacement( reference="beam") gevent.sleep(0.1) if None in beam_pos_displacement: log_msg = ( "Beam alignment failed! Unable to detect beam position." ) gui_log.error(log_msg) self.emit("progressStop", ()) return delta_hor = beam_pos_displacement[0] * self.scale_hor delta_ver = beam_pos_displacement[1] * self.scale_ver if delta_hor > 0.03: delta_hor = 0.03 if delta_hor < -0.03: delta_hor = -0.03 if delta_ver > 0.03: delta_ver = 0.03 if delta_ver < -0.03: delta_ver = -0.03 log_msg = ("Beam centering: Applying %.4f mm horizontal " % delta_hor + "and %.4f mm vertical correction" % delta_ver) gui_log.info(log_msg) if abs(delta_hor) > 0.0001: log_msg = ( "Beam centering: Moving horizontal by %.4f" % delta_hor) gui_log.info(log_msg) self.horizontal_motor_hwobj.set_value_relative( delta_hor) sleep(5) if abs(delta_ver) > 0.0001: log_msg = "Beam centering: Moving vertical by %.4f" % delta_ver gui_log.info(log_msg) self.vertical_motor_hwobj.set_value_relative(delta_ver) sleep(5) else: # Beam centering procedure for P14 ----------------------------------- # If energy < 10: set all lenses in ---------------------------- active_mode, beam_size = self.get_focus_mode() log_msg = "Applying Perp and Roll2nd correction" gui_log.info("Beam centering: %s" % log_msg) self.emit("progressStep", step, log_msg) delta_ver = 1.0 for i in range(5): if abs(delta_ver) > 0.100: self.cmd_set_pitch_position(0) self.cmd_set_pitch(1) gevent.sleep(0.1) if HWR.beamline.energy.get_value() < 10: crl_value = self.crl_hwobj.get_crl_value() self.crl_hwobj.set_crl_value([1, 1, 1, 1, 1, 1], timeout=30) self.cmd_start_pitch_scan(1) # GB : keep lenses in the beam during the scan # if self.bl_hwobj._get_energy() < 10: # self.crl_hwobj.set_crl_value(crl_value, timeout=30) gevent.sleep(2.0) with gevent.Timeout( 10, RuntimeError( "Timeout waiting for pitch scan ready")): while self.chan_pitch_scan_status.getValue() != 0: gevent.sleep(0.1) self.cmd_set_vmax_pitch(1) # GB : return original lenses only after scan finished if HWR.beamline.energy.get_value() < 10: self.crl_hwobj.set_crl_value(crl_value, timeout=30) sleep(2) with gevent.Timeout(10, False): beam_pos_displacement = [None, None] while None in beam_pos_displacement: beam_pos_displacement = HWR.beamline.sample_view.get_beam_displacement( reference="screen") gevent.sleep(0.1) if None in beam_pos_displacement: # log.debug("No beam detected") return if active_mode in ("Collimated", "Imaging"): delta_hor = (beam_pos_displacement[0] * self.scale_hor * HWR.beamline.energy.get_value() / 12.70) delta_ver = beam_pos_displacement[1] * self.scale_ver else: delta_hor = beam_pos_displacement[ 0] * self.scale_double_hor delta_ver = (beam_pos_displacement[1] * self.scale_double_ver * 0.5) log_msg = ( "Measured beam displacement: Horizontal " + "%.4f mm, Vertical %.4f mm" % beam_pos_displacement) gui_log.info(log_msg) # if abs(delta_ver) > 0.050 : # delta_ver *= 0.5 log_msg = ( "Applying %.4f mm horizontal " % delta_hor + "and %.4f mm vertical motor correction" % delta_ver) gui_log.info(log_msg) if active_mode in ("Collimated", "Imaging"): if abs(delta_hor) > 0.0001: log_msg = "Moving horizontal by %.4f" % delta_hor gui_log.info(log_msg) self.horizontal_motor_hwobj.set_value_relative( delta_hor, timeout=5) sleep(4) if abs(delta_ver) > 0.100: log_msg = "Moving vertical motor by %.4f" % delta_ver gui_log.info(log_msg) # self.vertical_motor_hwobj.set_value_relative(delta_ver, timeout=5) tine.set( "/p14/P14MonoMotor/Perp", "IncrementMove.START", delta_ver * 0.5, ) sleep(6) else: log_msg = "Moving vertical piezo by %.4f" % delta_ver gui_log.info(log_msg) self.vertical_motor_hwobj.set_value_relative( -1.0 * delta_ver, timeout=5) sleep(2) elif active_mode == "Double": if abs(delta_hor) > 0.0001: log_msg = "Moving horizontal by %.4f" % delta_hor gui_log.info(log_msg) self.horizontal_double_mode_motor_hwobj.set_value_relative( delta_hor, timeout=5) sleep(2) if abs(delta_ver) > 0.001: log_msg = "Moving vertical by %.4f" % delta_ver gui_log.info(log_msg) self.vertical_double_mode_motor_hwobj.set_value_relative( delta_ver, timeout=5) sleep(2) finished = True except: gui_log.error("Beam centering failed") finished = False finally: return finished
def wait_ready(self): acq_status_chan = self.getChannelObject("acq_status") with gevent.Timeout(10, RuntimeError("Detector not ready")): while acq_status_chan.getValue() != "Ready": time.sleep(1)
def waitEndOfMove(self, timeout=None): if self._move_task is not None: with gevent.Timeout(timeout): self._move_task.join()
def test_token_registered_race(raiden_chain, retry_timeout, unregistered_token): """If a token is registered it must appear on the token list. If two nodes register the same token one of the transactions will fail. The node that receives an error for "already registered token" must see the token in the token list. Issue: #784 """ app0, app1 = raiden_chain token_address = unregistered_token api0 = RaidenAPI(app0.raiden) api1 = RaidenAPI(app1.raiden) # Wait until Raiden can start using the token contract. # Here, the block at which the contract was deployed should be confirmed by Raiden. # Therefore, until that block is received. waiting.wait_for_block( raiden=app0.raiden, block_number=app0.raiden.get_block_number() + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS + 1, retry_timeout=retry_timeout, ) waiting.wait_for_block( raiden=app1.raiden, block_number=app1.raiden.get_block_number() + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS + 1, retry_timeout=retry_timeout, ) registry_address = app0.raiden.default_registry.address assert token_address not in api0.get_tokens_list(registry_address) assert token_address not in api1.get_tokens_list(registry_address) greenlets: set = { gevent.spawn( api0.token_network_register, registry_address=registry_address, token_address=token_address, channel_participant_deposit_limit=TokenAmount(UINT256_MAX), token_network_deposit_limit=TokenAmount(UINT256_MAX), ), gevent.spawn( api0.token_network_register, registry_address=registry_address, token_address=token_address, channel_participant_deposit_limit=TokenAmount(UINT256_MAX), token_network_deposit_limit=TokenAmount(UINT256_MAX), ), } # One of the nodes will lose the race with pytest.raises(RaidenRecoverableError): gevent.joinall(greenlets, raise_error=True) exception = RuntimeError( "Did not see the token registration within 30 seconds") with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app0.raiden, ContractReceiveNewTokenNetwork, {"token_network": { "token_address": token_address }}, retry_timeout, ) wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, {"token_network": { "token_address": token_address }}, retry_timeout, ) assert token_address in api0.get_tokens_list(registry_address) assert token_address in api1.get_tokens_list(registry_address) for api in (api0, api1): with pytest.raises(AlreadyRegisteredTokenAddress): api.token_network_register( registry_address=registry_address, token_address=token_address, channel_participant_deposit_limit=TokenAmount(UINT256_MAX), token_network_deposit_limit=TokenAmount(UINT256_MAX), )
def waitEndOfMove(self, timeout=None): with gevent.Timeout(timeout): time.sleep(0.1) while self.motorState == MicrodiffMotor.MOVING: time.sleep(0.1)
def test_participant_deposit_amount_must_be_smaller_than_the_limit( raiden_network: List[App], contract_manager: ContractManager, retry_timeout: float) -> None: """The Python API must properly check the requested participant deposit will not exceed the smart contract limit. This is companion test for `test_deposit_amount_must_be_smaller_than_the_token_network_limit`. The participant deposit limit was introduced for the bug bounty with the PR https://github.com/raiden-network/raiden-contracts/pull/276/ , the limit is available since version 0.4.0 of the smart contract. """ app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_supply = 1_000_000 contract_proxy, _ = app1.raiden.rpc_client.deploy_single_contract( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, contract=contract_manager.get_contract(CONTRACT_HUMAN_STANDARD_TOKEN), constructor_parameters=(token_supply, 2, "raiden", "Rd"), ) token_address = TokenAddress(to_canonical_address(contract_proxy.address)) api1 = RaidenAPI(app1.raiden) msg = "Token is not registered yet, it must not be in the token list." assert token_address not in api1.get_tokens_list(registry_address), msg # Wait until Raiden can start using the token contract. # Here, the block at which the contract was deployed should be confirmed by Raiden. # Therefore, until that block is received. waiting.wait_for_block( raiden=app1.raiden, block_number=BlockNumber(app1.raiden.get_block_number() + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS + 1), retry_timeout=retry_timeout, ) token_network_participant_deposit_limit = TokenAmount(100) api1.token_network_register( registry_address=registry_address, token_address=token_address, channel_participant_deposit_limit= token_network_participant_deposit_limit, token_network_deposit_limit=TokenAmount(UINT256_MAX), ) exception = RuntimeError( "Did not see the token registration within 30 seconds") with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, {"token_network": { "token_address": token_address }}, retry_timeout, ) msg = "Token has been registered, yet must be available in the token list." assert token_address in api1.get_tokens_list(registry_address), msg partner_address = make_address() api1.channel_open( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, ) with pytest.raises(DepositOverLimit): api1.set_total_channel_deposit( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, total_deposit=TokenAmount(token_network_participant_deposit_limit + 1), ) pytest.fail( "The deposit must fail if the requested deposit exceeds the participant deposit limit." )
def poll(self, transaction_hash, confirmations=None, timeout=None): """ Wait until the `transaction_hash` is applied or rejected. If timeout is None, this could wait indefinitely! Args: transaction_hash (hash): Transaction hash that we are waiting for. confirmations (int): Number of block confirmations that we will wait for. timeout (float): Timeout in seconds, raise an Excpetion on timeout. """ if transaction_hash.startswith('0x'): warnings.warn( 'transaction_hash seems to be already encoded, this will' ' result in unexpected behavior' ) if len(transaction_hash) != 32: raise ValueError( 'transaction_hash length must be 32 (it might be hex encoded)' ) transaction_hash = data_encoder(transaction_hash) deadline = None if timeout: deadline = gevent.Timeout(timeout) deadline.start() try: # used to check if the transaction was removed, this could happen # if gas price is too low: # # > Transaction (acbca3d6) below gas price (tx=1 Wei ask=18 # > Shannon). All sequential txs from this address(7d0eae79) # > will be ignored # last_result = None while True: # Could return None for a short period of time, until the # transaction is added to the pool transaction = self.call('eth_getTransactionByHash', transaction_hash) # if the transaction was added to the pool and then removed if transaction is None and last_result is not None: raise Exception('invalid transaction, check gas price') # the transaction was added to the pool and mined if transaction and transaction['blockNumber'] is not None: break last_result = transaction gevent.sleep(.5) if confirmations: # this will wait for both APPLIED and REVERTED transactions transaction_block = quantity_decoder(transaction['blockNumber']) confirmation_block = transaction_block + confirmations block_number = self.block_number() while block_number < confirmation_block: gevent.sleep(.5) block_number = self.block_number() except gevent.Timeout: raise Exception('timeout when polling for transaction') finally: if deadline: deadline.cancel()
def test_deposit_amount_must_be_smaller_than_the_token_network_limit( raiden_network: List[App], contract_manager: ContractManager, retry_timeout: float) -> None: """The Python API must properly check the requested deposit will not exceed the token network deposit limit. This is a regression test for #3135. As of version `v0.18.1` (commit 786347b23), the proxy was not properly checking that the requested deposit amount was smaller than the smart contract deposit limit. This led to two errors: - The error message was vague and incorrect: "Deposit amount decreased" - The exception used was not handled and crashed the node. This test checks the limit is properly check from the REST API. """ app1 = raiden_network[0] registry_address = app1.raiden.default_registry.address token_supply = 1_000_000 contract_proxy, _ = app1.raiden.rpc_client.deploy_single_contract( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, contract=contract_manager.get_contract(CONTRACT_HUMAN_STANDARD_TOKEN), constructor_parameters=(token_supply, 2, "raiden", "Rd"), ) token_address = TokenAddress(to_canonical_address(contract_proxy.address)) # Wait until Raiden can start using the token contract. # Here, the block at which the contract was deployed should be confirmed by Raiden. # Therefore, until that block is received. waiting.wait_for_block( raiden=app1.raiden, block_number=BlockNumber(app1.raiden.get_block_number() + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS + 1), retry_timeout=retry_timeout, ) api1 = RaidenAPI(app1.raiden) msg = "Token is not registered yet, it must not be in the token list." assert token_address not in api1.get_tokens_list(registry_address), msg token_network_deposit_limit = TokenAmount(100) api1.token_network_register( registry_address=registry_address, token_address=token_address, channel_participant_deposit_limit=token_network_deposit_limit, token_network_deposit_limit=token_network_deposit_limit, ) exception = RuntimeError( "Did not see the token registration within 30 seconds") with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app1.raiden, ContractReceiveNewTokenNetwork, {"token_network": { "token_address": token_address }}, retry_timeout, ) msg = "Token has been registered, yet must be available in the token list." assert token_address in api1.get_tokens_list(registry_address), msg partner_address = make_address() api1.channel_open( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, ) with pytest.raises(DepositOverLimit): api1.set_total_channel_deposit( registry_address=app1.raiden.default_registry.address, token_address=token_address, partner_address=partner_address, total_deposit=TokenAmount(token_network_deposit_limit + 1), ) pytest.fail( "The deposit must fail if the requested deposit exceeds the token " "network deposit limit.")
def actionFileNeed(self, to, inner_path, timeout=300): try: with gevent.Timeout(timeout): self.site.needFile(inner_path, priority=6) except Exception, err: return self.response(to, {"error": str(err)})
def test_token_registered_race(raiden_chain, token_amount, retry_timeout, contract_manager): """If a token is registered it must appear on the token list. If two nodes register the same token one of the transactions will fail. The node that receives an error for "already registered token" must see the token in the token list. Issue: #784 """ app0, app1 = raiden_chain api0 = RaidenAPI(app0.raiden) api1 = RaidenAPI(app1.raiden) # Recreate the race condition by making sure the non-registering app won't # register at all by watching for the TokenAdded blockchain event. event_listeners = app1.raiden.blockchain_events.event_listeners app1.raiden.blockchain_events.event_listeners = list() token_address = deploy_contract_web3( contract_name=CONTRACT_HUMAN_STANDARD_TOKEN, deploy_client=app1.raiden.chain.client, contract_manager=contract_manager, constructor_arguments=( token_amount, 2, 'raiden', 'Rd', ), ) gevent.sleep(1) registry_address = app0.raiden.default_registry.address assert token_address not in api0.get_tokens_list(registry_address) assert token_address not in api1.get_tokens_list(registry_address) api0.token_network_register(registry_address, token_address) exception = RuntimeError( 'Did not see the token registration within 30 seconds') with gevent.Timeout(seconds=30, exception=exception): wait_for_state_change( app0.raiden, ContractReceiveNewTokenNetwork, { 'token_network': { 'token_address': token_address, }, }, retry_timeout, ) assert token_address in api0.get_tokens_list(registry_address) assert token_address not in api1.get_tokens_list(registry_address) # The next time when the event is polled, the token is registered app1.raiden.blockchain_events.event_listeners = event_listeners waiting.wait_for_block( app1.raiden, app1.raiden.get_block_number() + 1, retry_timeout, ) assert token_address in api1.get_tokens_list(registry_address)
def _wait_device_ready(self, timeout=None): with gevent.Timeout(timeout, Exception("Timeout waiting for device ready")): while not self._is_device_ready(): gevent.sleep(0.01)
def __init__(self, span: ServerSpan, timeout: datetime.timedelta, debug: bool): timeout_seconds = timeout.total_seconds() exception = ServerTimeout(span.name, timeout_seconds, debug) self.timeout = gevent.Timeout(timeout_seconds, exception)
""" from __future__ import with_statement import gevent from gevent import socket from gevent.pool import Pool N = 1000 # limit ourselves to max 10 simultaneous outstanding requests pool = Pool(10) finished = 0 def job(url): global finished try: try: ip = socket.gethostbyname(url) print('%s = %s' % (url, ip)) except socket.gaierror as ex: print('%s failed with %s' % (url, ex)) finally: finished += 1 with gevent.Timeout(2, False): for x in range(10, 10 + N): pool.spawn(job, '%s.com' % x) pool.join() print('finished within 2 seconds: %s/%s' % (finished, N))
def center_beam_task(self): """Centers beam in a following procedure: 1. Store aperture position and take out the aperture 2. Store slits position and open to max 3. Do pitch scan if possible 3. In a loop take snapshot and move motors 4. Put back aperture and move to original slits positions """ gui_log = logging.getLogger("GUI") log_msg = "" if not HWR.beamline.safety_shutter.is_opened(): log_msg = "Beam centering failed! Safety shutter is closed! Open the shutter to continue." gui_log.error(log_msg) self.ready_event.set() return aperture_hwobj = HWR.beamline.beam.aperture current_energy = HWR.beamline.energy.get_value() current_transmission = HWR.transmission.get_value() active_mode, beam_size = self.get_focus_mode() log_msg = "Beam centering: Active mode %s" % active_mode gui_log.info(log_msg) if active_mode in ("Imaging", "TREXX"): log_msg = "Beam centering: doing pitch scan only" gui_log.info(log_msg) if current_energy < 10: crl_value = self.crl_hwobj.get_crl_value() self.crl_hwobj.set_crl_value([1, 1, 1, 1, 1, 1], timeout=30) self.cmd_start_pitch_scan(1) gevent.sleep(2.0) with gevent.Timeout( 10, RuntimeError("Timeout waiting for pitch scan ready")): while self.chan_pitch_scan_status.getValue() != 0: gevent.sleep(0.1) self.cmd_set_vmax_pitch(1) if current_energy < 10: self.crl_hwobj.set_crl_value(crl_value, timeout=30) sleep(2) gui_log.info("Beam centering: done") self.ready_event.set() return try: step = 1 log_msg = "Starting beam centring" gui_log.info("Beam centering: %s" % log_msg) self.emit("progressInit", ("Beam centering...", 20, True)) # Diffractometer in BeamLocation phase --------------------------- msg = "Setting diffractometer in BeamLocation phase" gui_log.info("Beam centering: %s" % msg) self.emit("progressStep", step, msg) HWR.beamline.diffractometer.wait_device_ready(10) HWR.beamline.diffractometer.set_phase( HWR.beamline.diffractometer.PHASE_BEAM, timeout=45) # Open the fast shutter and set aperture out -------------------- step += 1 log_msg = "Opening fast shutter and setting aperture out" gui_log.info("Beam centering: %s" % log_msg) self.emit("progressStep", step, log_msg) HWR.beamline.fast_shutter.openShutter() gevent.sleep(0.1) aperture_hwobj.set_out() # Adjust transmission --------------------------------------------- step += 1 log_msg = ( "Adjusting transmission to the current energy %.1f keV" % current_energy) gui_log.info("Beam centering: %s" % log_msg) self.emit("progressStep", step, log_msg) if current_energy < 7: new_transmission = 100 else: energy_transm = interp1d([6.9, 8.0, 12.7, 19.0], [100.0, 60.0, 15.0, 10]) new_transmission = round(energy_transm(current_energy), 2) if HWR.beamline.session.beamline_name == "P13": HWR.beamline.transmission.set_value( # Transmission( new_transmission, timeout=45) HWR.beamline.diffractometer.set_zoom( "Zoom 4") # was 4, use 1 with broken zoom motor # capillary_position = ( # HWR.beamline.diffractometer.get_capillary_position() # ) HWR.beamline.diffractometer.set_capillary_position("OFF") gevent.sleep(1) self.move_beam_to_center() else: slits_hwobj = HWR.beamline.beam.slits if active_mode in ("Collimated", "Imaging", "TREXX"): HWR.beamline.transmission.set_value( # Transmission( new_transmission, timeout=45) HWR.beamline.diffractometer.set_zoom("Zoom 4") else: # 2% transmission for beam centering in double foucused mode HWR.beamline.transmission.set_value(2, timeout=45) HWR.beamline.diffractometer.set_zoom("Zoom 8") step += 1 log_msg = "Opening slits to 1 x 1 mm" gui_log.info("Beam centering: %s" % log_msg) self.emit("progressStep", step, log_msg) # GB: keep standard slits settings for double foucsed mode if active_mode in ("Collimated", "Imaging", "TREXX"): slits_hwobj.set_vertical_gap(1.0) # "Hor", 1.0) slits_hwobj.set_horizontal_gap(1.0) # "Ver", 1.0) # Actual centring procedure --------------- result = self.move_beam_to_center() if not result: gui_log.error("Beam centering: Failed") self.emit("progressStop", ()) self.ready_event.set() return # For unfocused mode setting slits to 0.1 x 0.1 mm --------------- if active_mode in ("Collimated", "Imaging", "TREXX"): step += 1 log_msg = "Setting slits to 0.1 x 0.1 mm" gui_log.info("Beam centering: %s" % log_msg) self.emit("progressStep", step, log_msg) slits_hwobj.set_horizontal_gap(0.1) # "Hor", 0.1) slits_hwobj.set_vertical_gap(0.1) # "Ver", 0.1) sleep(3) # Update position of the beam mark position ---------------------- step += 1 log_msg = "Updating beam mark position" self.emit("progressStep", step, log_msg) gui_log.info("Beam centering: %s" % log_msg) HWR.beamline.sample_view.move_beam_mark_auto() HWR.beamline.transmission.set_value(current_transmission) HWR.beamline.sample_view.graphics_beam_item.set_detected_beam_position( None, None) log_msg = "Done" gui_log.info("Beam centering: %s" % log_msg) self.emit("progressStop", ()) self.ready_event.set() except Exception as ex: log_msg = "Beam centering failed in the step: %s (%s)" % (log_msg, str(ex)) gui_log.error(log_msg) self.emit("progressStop", ()) self.ready_event.set() return False finally: HWR.beamline.fast_shutter.closeShutter(wait=False)
def do_collect(self, owner, data_collect_parameters): if self.__safety_shutter_close_task is not None: self.__safety_shutter_close_task.kill() logging.getLogger("user_level_log").info("Closing fast shutter") self.close_fast_shutter() # reset collection id on each data collect self.collection_id = None # Preparing directory path for images and processing files # creating image file template and jpegs files templates file_parameters = data_collect_parameters["fileinfo"] file_parameters["suffix"] = self.bl_config.detector_fileext image_file_template = ( "%(prefix)s_%(run_number)s_%%04d.%(suffix)s" % file_parameters ) file_parameters["template"] = image_file_template archive_directory = self.get_archive_directory(file_parameters["directory"]) data_collect_parameters["archive_dir"] = archive_directory if archive_directory: jpeg_filename = "%s.jpeg" % os.path.splitext(image_file_template)[0] thumb_filename = "%s.thumb.jpeg" % os.path.splitext(image_file_template)[0] jpeg_file_template = os.path.join(archive_directory, jpeg_filename) jpeg_thumbnail_file_template = os.path.join( archive_directory, thumb_filename ) else: jpeg_file_template = None jpeg_thumbnail_file_template = None # database filling if self.bl_control.lims: data_collect_parameters["collection_start_time"] = time.strftime( "%Y-%m-%d %H:%M:%S" ) if self.bl_control.machine_current is not None: logging.getLogger("user_level_log").info( "Getting synchrotron filling mode" ) data_collect_parameters[ "synchrotronMode" ] = self.get_machine_fill_mode() data_collect_parameters["status"] = "failed" logging.getLogger("user_level_log").info("Storing data collection in LIMS") ( self.collection_id, detector_id, ) = self.bl_control.lims.store_data_collection( data_collect_parameters, self.bl_config ) data_collect_parameters["collection_id"] = self.collection_id if detector_id: data_collect_parameters["detector_id"] = detector_id # Creating the directory for images and processing information logging.getLogger("user_level_log").info( "Creating directory for images and processing" ) self.create_directories( file_parameters["directory"], file_parameters["process_directory"] ) self.xds_directory, self.mosflm_directory, self.hkl2000_directory = self.prepare_input_files( file_parameters["directory"], file_parameters["prefix"], file_parameters["run_number"], file_parameters["process_directory"], ) data_collect_parameters["xds_dir"] = self.xds_directory logging.getLogger("user_level_log").info("Getting sample info from parameters") sample_id, sample_location, sample_code = self.get_sample_info_from_parameters( data_collect_parameters ) data_collect_parameters["blSampleId"] = sample_id if self.bl_control.sample_changer is not None: try: data_collect_parameters[ "actualSampleBarcode" ] = self.bl_control.sample_changer.getLoadedSample().getID() data_collect_parameters["actualContainerBarcode"] = ( self.bl_control.sample_changer.getLoadedSample() .getContainer() .getID() ) logging.getLogger("user_level_log").info("Getting loaded sample coords") basket, vial = ( self.bl_control.sample_changer.getLoadedSample().getCoords() ) data_collect_parameters["actualSampleSlotInContainer"] = vial data_collect_parameters["actualContainerSlotInSC"] = basket except BaseException: data_collect_parameters["actualSampleBarcode"] = None data_collect_parameters["actualContainerBarcode"] = None else: data_collect_parameters["actualSampleBarcode"] = None data_collect_parameters["actualContainerBarcode"] = None centring_info = {} try: logging.getLogger("user_level_log").info("Getting centring status") centring_status = self.diffractometer().getCentringStatus() except BaseException: pass else: centring_info = dict(centring_status) # Save sample centring positions positions_str = "" motors = centring_info.get( "motors", {} ) # .update(centring_info.get("extraMotors", {})) motors_to_move_before_collect = data_collect_parameters.setdefault("motors", {}) for motor, pos in motors.iteritems(): if motor in motors_to_move_before_collect: continue motors_to_move_before_collect[motor] = pos current_diffractometer_position = self.diffractometer().getPositions() for motor in motors_to_move_before_collect.keys(): if motors_to_move_before_collect[motor] is None: del motors_to_move_before_collect[motor] try: if current_diffractometer_position[motor] is not None: positions_str += "%s=%f " % ( motor, current_diffractometer_position[motor], ) except BaseException: pass # this is for the LIMS positions_str += " ".join( [ motor + ("=%f" % pos) for motor, pos in motors_to_move_before_collect.iteritems() ] ) data_collect_parameters["actualCenteringPosition"] = positions_str self.move_motors(motors_to_move_before_collect) # take snapshots, then assign centring status (which contains images) to # centring_info variable take_snapshots = data_collect_parameters.get("take_snapshots", False) if take_snapshots: logging.getLogger("user_level_log").info("Taking sample snapshosts") self._take_crystal_snapshots(take_snapshots) centring_info = self.bl_control.diffractometer.getCentringStatus() # move *again* motors, since taking snapshots may change positions logging.getLogger("user_level_log").info( "Moving motors: %r", motors_to_move_before_collect ) self.move_motors(motors_to_move_before_collect) if self.bl_control.lims: try: if self.current_lims_sample: self.current_lims_sample[ "lastKnownCentringPosition" ] = positions_str logging.getLogger("user_level_log").info( "Updating sample information in LIMS" ) self.bl_control.lims.update_bl_sample(self.current_lims_sample) except BaseException: logging.getLogger("HWR").exception( "Could not update sample information in LIMS" ) if centring_info.get("images"): # Save snapshots snapshot_directory = self.get_archive_directory( file_parameters["directory"] ) try: logging.getLogger("user_level_log").info( "Creating snapshosts directory: %r", snapshot_directory ) self.create_directories(snapshot_directory) except BaseException: logging.getLogger("HWR").exception("Error creating snapshot directory") else: snapshot_i = 1 snapshots = [] for img in centring_info["images"]: img_phi_pos = img[0] img_data = img[1] snapshot_filename = "%s_%s_%s.snapshot.jpeg" % ( file_parameters["prefix"], file_parameters["run_number"], snapshot_i, ) full_snapshot = os.path.join(snapshot_directory, snapshot_filename) try: f = open(full_snapshot, "w") logging.getLogger("user_level_log").info( "Saving snapshot %d", snapshot_i ) f.write(img_data) except BaseException: logging.getLogger("HWR").exception("Could not save snapshot!") try: f.close() except BaseException: pass data_collect_parameters[ "xtalSnapshotFullPath%i" % snapshot_i ] = full_snapshot snapshots.append(full_snapshot) snapshot_i += 1 try: data_collect_parameters["centeringMethod"] = centring_info["method"] except BaseException: data_collect_parameters["centeringMethod"] = None if self.bl_control.lims: try: logging.getLogger("user_level_log").info( "Updating data collection in LIMS" ) if "kappa" in data_collect_parameters["actualCenteringPosition"]: data_collect_parameters["oscillation_sequence"][0][ "kappaStart" ] = current_diffractometer_position["kappa"] data_collect_parameters["oscillation_sequence"][0][ "phiStart" ] = current_diffractometer_position["kappa_phi"] self.bl_control.lims.update_data_collection(data_collect_parameters) except BaseException: logging.getLogger("HWR").exception( "Could not update data collection in LIMS" ) oscillation_parameters = data_collect_parameters["oscillation_sequence"][0] sample_id = data_collect_parameters["blSampleId"] subwedge_size = oscillation_parameters.get("reference_interval", 1) # if data_collect_parameters["shutterless"]: # subwedge_size = 1 # else: # subwedge_size = oscillation_parameters["number_of_images"] wedges_to_collect = self.prepare_wedges_to_collect( oscillation_parameters["start"], oscillation_parameters["number_of_images"], oscillation_parameters["range"], subwedge_size, oscillation_parameters["overlap"], ) nframes = sum([wedge_size for _, wedge_size in wedges_to_collect]) # Added exposure time for ProgressBarBrick. # Extra time for each collection needs to be added (in this case 0.04) self.emit( "collectNumberOfFrames", nframes, oscillation_parameters["exposure_time"] + 0.04, ) start_image_number = oscillation_parameters["start_image_number"] last_frame = start_image_number + nframes - 1 if data_collect_parameters["skip_images"]: for start, wedge_size in wedges_to_collect[:]: filename = image_file_template % start_image_number file_location = file_parameters["directory"] file_path = os.path.join(file_location, filename) if os.path.isfile(file_path): logging.info("Skipping existing image %s", file_path) del wedges_to_collect[0] start_image_number += wedge_size nframes -= wedge_size else: # images have to be consecutive break if nframes == 0: return # data collection self.first_image_timeout = 30 + oscillation_parameters["exposure_time"] self.data_collection_hook(data_collect_parameters) if "transmission" in data_collect_parameters: logging.getLogger("user_level_log").info( "Setting transmission to %f", data_collect_parameters["transmission"] ) self.set_transmission(data_collect_parameters["transmission"]) if "wavelength" in data_collect_parameters: logging.getLogger("user_level_log").info( "Setting wavelength to %f", data_collect_parameters["wavelength"] ) self.set_wavelength(data_collect_parameters["wavelength"]) elif "energy" in data_collect_parameters: logging.getLogger("user_level_log").info( "Setting energy to %f", data_collect_parameters["energy"] ) self.set_energy(data_collect_parameters["energy"]) if "resolution" in data_collect_parameters: resolution = data_collect_parameters["resolution"]["upper"] logging.getLogger("user_level_log").info( "Setting resolution to %f", resolution ) self.set_resolution(resolution) elif "detector_distance" in oscillation_parameters: logging.getLogger("user_level_log").info( "Moving detector to %f", data_collect_parameters["detector_distance"] ) self.move_detector(oscillation_parameters["detector_distance"]) # 0: software binned, 1: unbinned, 2:hw binned self.set_detector_mode(data_collect_parameters["detector_mode"]) with cleanup(self.data_collection_cleanup): if not self.safety_shutter_opened(): logging.getLogger("user_level_log").info("Opening safety shutter") self.open_safety_shutter(timeout=10) logging.getLogger("user_level_log").info("Preparing intensity monitors") self.prepare_intensity_monitors() frame = start_image_number osc_range = oscillation_parameters["range"] exptime = oscillation_parameters["exposure_time"] npass = oscillation_parameters["number_of_passes"] # update LIMS if self.bl_control.lims: try: logging.getLogger("user_level_log").info( "Gathering data for LIMS update" ) data_collect_parameters["flux"] = self.get_flux() data_collect_parameters["flux_end"] = data_collect_parameters[ "flux" ] data_collect_parameters["wavelength"] = self.get_wavelength() data_collect_parameters[ "detectorDistance" ] = self.get_detector_distance() data_collect_parameters["resolution"] = self.get_resolution() data_collect_parameters["transmission"] = self.get_transmission() beam_centre_x, beam_centre_y = self.get_beam_centre() data_collect_parameters["xBeam"] = beam_centre_x data_collect_parameters["yBeam"] = beam_centre_y und = self.get_undulators_gaps() i = 1 for jj in self.bl_config.undulators: key = jj.type if key in und: data_collect_parameters["undulatorGap%d" % (i)] = und[key] i += 1 data_collect_parameters[ "resolutionAtCorner" ] = self.get_resolution_at_corner() beam_size_x, beam_size_y = self.get_beam_size() data_collect_parameters["beamSizeAtSampleX"] = beam_size_x data_collect_parameters["beamSizeAtSampleY"] = beam_size_y data_collect_parameters["beamShape"] = self.get_beam_shape() hor_gap, vert_gap = self.get_slit_gaps() data_collect_parameters["slitGapHorizontal"] = hor_gap data_collect_parameters["slitGapVertical"] = vert_gap logging.getLogger("user_level_log").info( "Updating data collection in LIMS" ) self.bl_control.lims.update_data_collection( data_collect_parameters, wait=True ) logging.getLogger("user_level_log").info( "Done updating data collection in LIMS" ) except BaseException: logging.getLogger("HWR").exception( "Could not store data collection into LIMS" ) if self.bl_control.lims and self.bl_config.input_files_server: logging.getLogger("user_level_log").info( "Asking for input files writing" ) self.write_input_files(self.collection_id, wait=False) # at this point input files should have been written # TODO aggree what parameters will be sent to this function if data_collect_parameters.get("processing", False) == "True": self.trigger_auto_processing( "before", self.xds_directory, data_collect_parameters["EDNA_files_dir"], data_collect_parameters["anomalous"], data_collect_parameters["residues"], data_collect_parameters["do_inducedraddam"], data_collect_parameters.get("sample_reference", {}).get( "spacegroup", "" ), data_collect_parameters.get("sample_reference", {}).get("cell", ""), ) if self.run_without_loop: self.execute_collect_without_loop(data_collect_parameters) else: for start, wedge_size in wedges_to_collect: logging.getLogger("user_level_log").info( "Preparing acquisition, start=%f, wedge size=%d", start, wedge_size, ) self.prepare_acquisition( 1 if data_collect_parameters.get("dark", 0) else 0, start, osc_range, exptime, npass, wedge_size, data_collect_parameters["comment"], ) data_collect_parameters["dark"] = 0 i = 0 j = wedge_size while j > 0: frame_start = start + i * osc_range i += 1 filename = image_file_template % frame try: jpeg_full_path = jpeg_file_template % frame jpeg_thumbnail_full_path = ( jpeg_thumbnail_file_template % frame ) except BaseException: jpeg_full_path = None jpeg_thumbnail_full_path = None file_location = file_parameters["directory"] file_path = os.path.join(file_location, filename) self.set_detector_filenames( frame, frame_start, str(file_path), str(jpeg_full_path), str(jpeg_thumbnail_full_path), ) osc_start, osc_end = self.prepare_oscillation( frame_start, osc_range, exptime, npass ) with error_cleanup(self.reset_detector): self.start_acquisition(exptime, npass, j == wedge_size) self.do_oscillation(osc_start, osc_end, exptime, npass) self.stop_acquisition() self.write_image(j == 1) # Store image in lims if self.bl_control.lims: if self.store_image_in_lims(frame, j == wedge_size, j == 1): lims_image = { "dataCollectionId": self.collection_id, "fileName": filename, "fileLocation": file_location, "imageNumber": frame, "measuredIntensity": self.get_measured_intensity(), "synchrotronCurrent": self.get_machine_current(), "machineMessage": self.get_machine_message(), "temperature": self.get_cryo_temperature(), } if archive_directory: lims_image["jpegFileFullPath"] = jpeg_full_path lims_image[ "jpegThumbnailFileFullPath" ] = jpeg_thumbnail_full_path try: self.bl_control.lims.store_image(lims_image) except BaseException: logging.getLogger("HWR").exception( "Could not store store image in LIMS" ) self.generate_image_jpeg( str(file_path), str(jpeg_full_path), str(jpeg_thumbnail_full_path), wait=False, ) if data_collect_parameters.get("processing", False) == "True": self.trigger_auto_processing( "image", self.xds_directory, data_collect_parameters["EDNA_files_dir"], data_collect_parameters["anomalous"], data_collect_parameters["residues"], data_collect_parameters["do_inducedraddam"], data_collect_parameters.get("sample_reference", {}).get( "spacegroup", "" ), data_collect_parameters.get("sample_reference", {}).get( "cell", "" ), ) if data_collect_parameters.get("shutterless"): with gevent.Timeout( self.first_image_timeout, RuntimeError( "Timeout waiting for detector trigger, no image taken" ), ): while self.last_image_saved() == 0: time.sleep(exptime) last_image_saved = self.last_image_saved() if last_image_saved < wedge_size: time.sleep(exptime * wedge_size / 100.0) last_image_saved = self.last_image_saved() frame = max( start_image_number + 1, start_image_number + last_image_saved - 1, ) self.emit("collectImageTaken", frame) j = wedge_size - last_image_saved else: j -= 1 self.emit("collectImageTaken", frame) frame += 1 if j == 0: break # Bug fix for MD2/3(UP): diffractometer still has things to do even after the last frame is taken (decelerate motors and # possibly download diagnostics) so we cannot trigger the cleanup (that will send an abort on the diffractometer) as soon as # the last frame is counted self.diffractometer().wait_ready(10) # data collection done self.data_collection_end_hook(data_collect_parameters)
def waiter(q): with gevent.Timeout( 0.1 if not greentest.RUNNING_ON_APPVEYOR else 0.5): self.assertEqual(q.get(), 'hi2') return "OK"