def __init__(self): # 2. 初始化配置及相关变量 # self.proxies = settings.getlist('PROXIES') self.lock = DeferredLock() self.lockproxy = DeferredLock() self.proxies = {} self.api = 'http://lianjiaip.v4.dailiyun.com/query.txt?key=NP7CE43238&word=&count=50&rand=false&detail=true' self.stats = defaultdict(int) self.max_failed = 10 self.time = time.time()
def _get_xem(self, serial=None): if serial not in self._open_xems: devices = ok.okCFrontPanelDevices() xem = devices.Open(serial) xem._lock = DeferredLock() self._open_xems[serial] = xem return self._open_xems[serial]
def add_to_load_balancers(log, request_bag, lb_configs, server, undo): """ Add the given server to the load balancers specified by ``lb_configs``. :param log: A bound logger. :param callable request_bag: A request function. :param list lb_configs: List of lb_config dictionaries. :param dict server: Server dict of the server to add, as per server details response from Nova. :param IUndoStack undo: An IUndoStack to push any reversable operations onto. :return: Deferred that fires with a list of 2-tuples of the load balancer configuration, and that load balancer's respective response. """ _add = partial(add_to_load_balancer, log, request_bag, server_details=server, undo=undo) dl = DeferredLock() @inlineCallbacks def _serial_add(lb_config): yield dl.acquire() result = yield _add(lb_config) yield dl.release() returnValue(result) d = gatherResults(map(_serial_add, lb_configs), consumeErrors=True) return d.addCallback(partial(zip, lb_configs))
def __init__(self, nodedir=None, executable=None): self.executable = executable self.multi_folder_support = True if nodedir: self.nodedir = os.path.expanduser(nodedir) else: self.nodedir = os.path.join(os.path.expanduser('~'), '.tahoe') self.rootcap_path = os.path.join(self.nodedir, 'private', 'rootcap') self.servers_yaml_path = os.path.join(self.nodedir, 'private', 'servers.yaml') self.config = Config(os.path.join(self.nodedir, 'tahoe.cfg')) self.pidfile = os.path.join(self.nodedir, 'twistd.pid') self.nodeurl = None self.shares_happy = None self.name = os.path.basename(self.nodedir) self.api_token = None self.magic_folders_dir = os.path.join(self.nodedir, 'magic-folders') self.lock = DeferredLock() self.rootcap = None self.magic_folders = defaultdict(dict) self.remote_magic_folders = defaultdict(dict) self.use_tor = False self.monitor = Monitor(self) self._monitor_started = False self.state = Tahoe.STOPPED
def __init__(self, priority): self._lock = DeferredLock() self.priority = str(priority) self.is_shutdown = False self.state_pub = state_publisher.StatePublisher(()) self.flush() reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)
def __init__( self, database: DatabasePool, db_conn: LoggingDatabaseConnection, hs: "HomeServer", ): super().__init__(database, db_conn, hs) self.server_name: str = hs.hostname self.clock = self.hs.get_clock() self.stats_enabled = hs.config.stats.stats_enabled self.stats_delta_processing_lock = DeferredLock() self.db_pool.updates.register_background_update_handler( "populate_stats_process_rooms", self._populate_stats_process_rooms) self.db_pool.updates.register_background_update_handler( "populate_stats_process_users", self._populate_stats_process_users) # we no longer need to perform clean-up, but we will give ourselves # the potential to reintroduce it in the future – so documentation # will still encourage the use of this no-op handler. self.db_pool.updates.register_noop_background_update( "populate_stats_cleanup") self.db_pool.updates.register_noop_background_update( "populate_stats_prepare")
def rate_limited(max_per_second: int): """Rate-limits the decorated function locally, for one process.""" lock = DeferredLock() min_interval = 1.0 / max_per_second def decorate(func): last_time_called = time.perf_counter() @wraps(func) def rate_limited_function(*args, **kwargs): lock.acquire() nonlocal last_time_called try: elapsed = time.perf_counter() - last_time_called left_to_wait = min_interval - elapsed if left_to_wait > 0: time.sleep(left_to_wait) return func(*args, **kwargs) finally: last_time_called = time.perf_counter() lock.release() return rate_limited_function return decorate
def test__calling_importer_issues_rpc_calls_to_clusters(self): # Some clusters that we'll ask to import resources. rack_1 = factory.make_RackController() rack_2 = factory.make_RackController() # Connect only cluster #1. rack_1_conn = self.rpc.makeCluster(rack_1, ImportBootImages) rack_1_conn.ImportBootImages.return_value = succeed({}) # Do the import. importer = RackControllersImporter.new( [rack_1.system_id, rack_2.system_id]) results = importer(lock=DeferredLock()).wait(5) # The results are a list (it's from a DeferredList). self.assertThat( results, MatchesListwise(( # Success when calling rack_1. Equals((True, {})), # Failure when calling rack_1: no connection. MatchesListwise(( Is(False), MatchesAll( IsInstance(Failure), MatchesStructure( value=IsInstance(NoConnectionsAvailable)), ), )), )), )
def __init__(self, host, name, qnodeos_net, backend, network_name="default"): """ Initialize NetQASM Factory. lhost details of the local host (class host) """ self.host = host self.name = name self.qnodeos_net = qnodeos_net self.virtRoot = None self.qReg = None self.backend = backend(self) self.network_name = network_name # Dictionary that keeps qubit dictorionaries for each application self.qubitList = {} # Lock governing access to the qubitList self._lock = DeferredLock() self._logger = get_netqasm_logger(f"{self.__class__.__name__}({name})") # Read in topology, if specified. topology=None means fully connected # topology self.topology = None if simulaqron_settings.network_config_file is not None: networks_config = NetworksConfigConstructor(file_path=simulaqron_settings.network_config_file) self.topology = networks_config.networks[network_name].topology
def initServer(self): self.collectionTime = { 0: 1.0, 1: 1.0 } #default collection times in the format channel:time(sec) self.inCommunication = DeferredLock() self.connectOKBoard()
def __init__(self, config=None): ApplicationSession.__init__(self, config) global live live = self self.logger = logging.getLogger('Live') self.logger.info("Config: %s", config) self.account_id = config.extra['authid'] self.secret = config.extra['secret'] if '-' not in self.account_id: self.account_id = "local-%s" % self.account_id self.authid = '%s:%s' % (self.account_id, self.secret[-7:]) self.joined = False self.lock = DeferredLock() self.checks = {} self.workers = {} self.CallOptions = CallOptions()
def __init__(self, host, name, cqc_net, backend, network_name="default"): """ Initialize CQC Factory. lhost details of the local host (class host) """ self.host = host self.name = name self.cqcNet = cqc_net self.virtRoot = None self.qReg = None self.backend = backend(self) self.network_name = network_name # Dictionary that keeps qubit dictorionaries for each application self.qubitList = {} # Lock governing access to the qubitList self._lock = DeferredLock() # Read in topology, if specified. topology=None means fully connected # topology self.topology = None if simulaqron_settings.topology_file is not None and simulaqron_settings.topology_file != "": self._setup_topology(simulaqron_settings.topology_file) else: if simulaqron_settings.network_config_file is not None: networks_config = NetworksConfigConstructor( file_path=simulaqron_settings.network_config_file) self.topology = networks_config.networks[network_name].topology
def initServer(self): """Initialize the server after connecting to LabRAD.""" self.knownDevices = {} # maps (server, channel) to (name, idn) self.deviceServers = { } # maps device name to list of interested servers. # each interested server is {'target':<>,'context':<>,'messageID':<>} self.identFunctions = {} # maps server to (setting, ctx) for ident self.identLock = DeferredLock() # named messages are sent with source ID first, which we ignore def connect_func(c, s_payload): (s, payload) = s_payload return self.gpib_device_connect(*payload) def disconnect_func(c, s_payload): (s, payload) = s_payload return self.gpib_device_disconnect(*payload) mgr = self.client.manager self._cxn.addListener(connect_func, source=mgr.ID, ID=10) self._cxn.addListener(disconnect_func, source=mgr.ID, ID=11) yield mgr.subscribe_to_named_message('GPIB Device Connect', 10, True) yield mgr.subscribe_to_named_message('GPIB Device Disconnect', 11, True) # do an initial scan of the available GPIB devices yield self.refreshDeviceLists()
def __init__(self, node, register, simNum, num=0): # Node where this qubit is located self.node = node # Register where this qubit is simulated self.register = register # Number in the register, if known self.num = num # Number of the simulated qubit, unique at each virtual node self.simNum = simNum # Lock marshalling access to this qubit self._lock = DeferredLock() # Mark this qubit as active (still connected to a register) self.active = True # Time until retry self._delay = 1 # Optional parameters for when the simulation is noise self.noisy = settings.simulaqron_settings.noisy_qubits self.T1 = settings.simulaqron_settings.t1 self.last_accessed = time.time() self._logger = get_netqasm_logger( f"{self.__class__.__name__}(node={node.name}, sim_num={simNum})")
def __init__(self, nodedir=None, executable=None, reactor=None): if reactor is None: from twisted.internet import reactor self.executable = executable self.multi_folder_support = True if nodedir: self.nodedir = os.path.expanduser(nodedir) else: self.nodedir = os.path.join(os.path.expanduser('~'), '.tahoe') self.rootcap_path = os.path.join(self.nodedir, 'private', 'rootcap') self.servers_yaml_path = os.path.join(self.nodedir, 'private', 'servers.yaml') self.config = Config(os.path.join(self.nodedir, 'tahoe.cfg')) self.pidfile = os.path.join(self.nodedir, 'twistd.pid') self.nodeurl = None self.shares_happy = None self.name = os.path.basename(self.nodedir) self.api_token = None self.magic_folders_dir = os.path.join(self.nodedir, 'magic-folders') self.lock = DeferredLock() self.rootcap = None self.magic_folders = defaultdict(dict) self.remote_magic_folders = defaultdict(dict) self.use_tor = False self.monitor = Monitor(self) streamedlogs_maxlen = None debug_settings = global_settings.get('debug') if debug_settings: log_maxlen = debug_settings.get('log_maxlen') if log_maxlen is not None: streamedlogs_maxlen = int(log_maxlen) self.streamedlogs = StreamedLogs(reactor, streamedlogs_maxlen) self.state = Tahoe.STOPPED self.newscap = "" self.newscap_checker = NewscapChecker(self)
def __init__(self, bandwidth=5): """ :param bandwidth: максимальная длина очереди, по превышению которой запросы будут отсекаться """ self.tasks = [] self.bandwidth = bandwidth self._lock = DeferredLock()
def __init__(self, kernel_key, conda_env=conda_env, env_path=env_path, pkg_path=pkg_path, config=ComponentConfig(realm=u"jupyter")): ApplicationSession.__init__(self, config=config) self._kernel_key = kernel_key self._conda_env = conda_env self._env_path = env_path self._pkg_path = pkg_path self._conda_env = conda_env self._lock = DeferredLock()
def open_by_serial(self, c, serial): if serial not in self._open_xems: fp = ok.okCFrontPanel() fp.GetDeviceCount() success = fp.OpenBySerial(serial) fp._lock = DeferredLock() self._open_xems[serial] = fp return serial
def __init__(self, cxn, context, dataset): super(Dataset, self).__init__() self.accessingData = DeferredLock() self.cxn = cxn self.context = context # context of the first dataset in the window self.dataset = dataset self.data = None self.setupDataListener(self.context)
def open(self, c, serial=''): if serial not in self._open_xems: devices = ok.okCFrontPanelDevices() xem = devices.Open(serial) serial = xem.GetSerialNumber() xem._lock = DeferredLock() self._open_xems[serial] = xem return serial
def __init__(self): self.lockDict = DeferredLock() self.dictRelayer = {} # key:relayerid,value:SBProtocol self.dictAccounts = {} # key:relayerid,value:array of SBProtocol self.lockDict = threading.RLock() self.SBMP_HEADERTAG = struct.pack("2B", 0x01, 0xBB) self.lockPendingCmd = threading.RLock()
def initServer(self): self.api_dac = api_dac() self.inCommunication = DeferredLock() connected = self.api_dac.connectOKBoard() if not connected: raise Exception ("Could not connect to DAC") self.d = yield self.initializeDAC() self.listeners = set()
def __init__(self, settings): super(My_RetryMiddleware, self).__init__(settings) self.lock = DeferredLock() self.seconds = settings.getint('expire_datetime') self.proxy_url = settings.get('PROXY_URL') self.expire_datetime = datetime.datetime.now() + datetime.timedelta( seconds=self.seconds) self.proxy = None
def __init__(self, name, conf): _log.info("CF_INIT %s", name) self.name, self.conf = name, conf self.channel_dict = defaultdict(list) self.iocs = dict() self.client = None self.currentTime = getCurrentTime self.lock = DeferredLock()
def __init__(self, spider_name): """ 初始化付费代理池, 短效代理 ip, port, 来源网站, 获取时间, 是否被封, 是否过期, 过期时间 """ self.redis = redis.StrictRedis(host=PROXY_REDIS_HOST, port=PROXY_REDIS_PORT, db=PROXY_REDIS_DB) self.lock = DeferredLock() self._init_proxy_pool(spider_name)
def __init__(self): super(IPProxyDownloadMiddleware, self).__init__() self.current_proxy = None self.lock = DeferredLock() self.blacked = False self.get_proxy = False self.proxies = [] # self.proxies_time = 9999999999 self.proxies_time = time.time()
def initServer(self): self.channelDict = hardwareConfiguration.channelDict self.collectionTime = hardwareConfiguration.collectionTime self.collectionMode = hardwareConfiguration.collectionMode self.sequenceType = hardwareConfiguration.sequenceType self.isProgrammed = hardwareConfiguration.isProgrammed self.inCommunication = DeferredLock() self.connectOKBoard() self.listeners = set()
def __init__(self, ip, port, bot, worker_log_dir): self.ip = ip self.port = port self.bot = bot self.worker_log_dir = worker_log_dir self.path = os.path.dirname(os.path.abspath(inspect.getfile( inspect.currentframe()))) self.connections = {} self.nodes = {} self.lock = DeferredLock()
def __init__(self, iface): self.iface = iface self._lock = DeferredLock() CompositeStatePublisher(lambda x: x, [ netlink_monitor.get_state_publisher(iface, IFSTATE.PLUGGED), netlink_monitor.get_state_publisher(iface, IFSTATE.UP), ]).subscribe(self._cb) self._is_shutdown = False self.state = None reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)
def __init__(self, service, clock=reactor): """ @param service: An object implementing the same whenConnected() API as the twisted.application.internet.ClientService class. @param clock: An object implementing IReactorTime. """ self._service = service self._clock = clock self._channel = None self._channel_lock = DeferredLock()