def __init__(self, config, register_shutdown=False): self.config = copy.deepcopy(PHANTOMJS) self.config = merge(self.config, config) self._proc = None self._stderr_reader = None self._comms_lock = threading.RLock() self._shutdown_lock = threading.RLock() if not self._which(self.config[u'executable']): raise renderer.RenderError(''.join([ u"Can't locate PhantomJS executable: ", self.config[u'executable'] ])) if not os.path.isfile(self.config[u'script']): raise renderer.RenderError(''.join( [u"Can't locate script: ", self.config[u'script']])) self._logger = logging.getLogger(u'PhantomJSRenderer') if register_shutdown and isinstance(threading.current_thread(), threading._MainThread): for sig in (SIGABRT, SIGINT, SIGTERM): signal(sig, self._on_signal)
def f(mutex): # Acquiring an RLock forces an entry for the foreign # thread to get made in the threading._active map. r = threading.RLock() r.acquire() r.release() mutex.release()
def __init__(self, bitHopper): """ Sets up coin difficulties and reads in old difficulties from file. """ #Add Coins self.diff = {} for attr_coin in bitHopper.altercoins.itervalues(): self.diff[attr_coin['short_name']] = attr_coin['recent_difficulty'] #Store bitHopper for logging self.bitHopper = bitHopper #Read in old diffs cfg = ConfigParser.ConfigParser() cfg.read(["diffwebs.cfg"]) #Add diff_sites self.diff_sites = [] for site in cfg.sections(): self.diff_sites.append(dict(cfg.items(site))) self.lock = threading.RLock() eventlet.spawn_n(self.update_difficulty)
def test_various_ops(self): # This takes about n/3 seconds to run (about n/3 clumps of tasks, # times about 1 second per clump). NUMTASKS = 10 # no more than 3 of the 10 can run at once sema = threading.BoundedSemaphore(value=3) mutex = threading.RLock() numrunning = Counter() threads = [] for i in range(NUMTASKS): t = TestThread("<thread %d>" % i, self, sema, mutex, numrunning) threads.append(t) t.start() if verbose: print 'waiting for all tasks to complete' for t in threads: t.join(NUMTASKS) self.assert_(not t.isAlive()) if verbose: print 'all tasks done' self.assertEqual(numrunning.get(), 0)
def __init__(self, bitHopper): self.bitHopper = bitHopper self.interval = 600 self.parseConfig() self.log_msg("Payouts interval: " + str(self.interval)) eventlet.spawn_n(self.run) self.lock = threading.RLock()
def startup(): global cfginitlock members = list(cfm.list_collective()) if len(members) < 2: # Not in collective mode, return return if cfginitlock is None: cfginitlock = threading.RLock() eventlet.spawn_n(start_collective)
def __init__(self, bitHopper): self.bitHopper = bitHopper self.interval = 60 self.reincarnateInterval = 7200 self.parseConfig() self.log_msg("Check interval: " + str(self.interval)) self.log_msg("Re-Incarnate interval: " + str(self.reincarnateInterval)) eventlet.spawn_n(self.run) self.lock = threading.RLock()
def __init__(self, bitHopper): self.curs = None self.bitHopper = bitHopper self.pool = bitHopper.pool self.check_database() self.shares = {} self.rejects = {} self.payout = {} self.lock = threading.RLock() thread = threading.Thread(target=self.write_database) thread.start()
def __init__(self, bitHopper): self.bitHopper = bitHopper try: self.calculate_profit = bitHopper.config.getboolean( 'main', 'calculate_profit') except NoOptionError: self.calculate_profit = True self.lock = threading.RLock() eventlet.spawn_n(self.update_profitability) self.rate = {'btc': 1.0} self.profitability = {'btc': 1.0}
def __init__(self, bitHopper): self.bitHopper = bitHopper self.servers = {} self.initialized = False self.lock = threading.RLock() self.pool_configs = ['pools.cfg', 'pools-custom.cfg'] self.started = False self.current_list = [] self.server_map = {} self.i = 0 with self.lock: self.loadConfig() self.bitHopper.db.pool = self
def __init__(self, bitHopper): hook_start = plugins.Hook('plugins.lp.init.start') hook_start.notify(self, bitHopper) self.bitHopper = bitHopper logging.info('LP Module Load') self.pool = self.bitHopper.pool self.blocks = {} self.lastBlock = None self.errors = {} self.polled = {} self.lock = threading.RLock() hook_end = plugins.Hook('plugins.lp.init.end') hook_end.notify(self, bitHopper) eventlet.spawn_n(self.start_lp)
def __init__(self,bitHopper): self.users = {} self.bitHopper = bitHopper logging.debug = logging.debug self.pool = self.bitHopper.pool self.db = self.bitHopper.db self.speed = self.bitHopper.speed self.difficulty = self.bitHopper.difficulty self.lock = threading.RLock() try: self.user_drop_time = self.bitHopper.config.get('main', 'user_drop_time') except: self.user_drop_time = 3600 with self.lock: users = self.db.get_users() for user in users: self.users[user] = {'shares':users[user]['shares'],'rejects':users[user]['rejects'], 'last':0, 'shares_time': [], 'hash_rate': 0} eventlet.spawn_n(self.prune)
def rlock_object(*args, **kwargs): return greenthreading.RLock(*args, **kwargs)
def __init__(self): self.active = False self.mylock = threading.RLock()
class Lifetime(Renderer): """ Wraps a Renderer instance and limits the maximum time a single render instance can live, and also shuts down a process if it has been unused for a specified time period. PhantomJS can be configured to use an in-memory browser cache which can grow without bounds. The Lifetime decorator is useful for controlling this growth by limiting the amount of time a single Renderer can live, and releasing its accumulated resources. For long or continual running of the rendering process, it is highly recommended to wrap the Renderer with the Lifetime decorator to prevent eventual OOM. """ _delegate = None _last_render_time = None _start_time = None _running = False _thread = None _condition = threading.Condition(threading.RLock()) _lock = threading.RLock() def __init__(self, renderer): self._delegate = renderer self.config = copy.deepcopy(LIFETIME) self.config.update(renderer.get_config()) self._logger = logging.getLogger(u'LifetimeDecorator') def get_config(self): return self.config def render(self, url, html=None, img_format='PNG', width=1280, height=1024, page_load_timeout=None, user_agent=None, headers=None, cookies=None, html_encoding=u'utf-8', http_proxy=None): with self._lock: self._last_render_time = time.time() if self._start_time is None: self._start_time = self._last_render_time if not self._running: self._startup() return self._delegate.render(url, html, img_format, width, height, page_load_timeout, user_agent, headers, cookies, html_encoding, http_proxy) def _startup(self): self._running = True self._thread = eventlet.spawn(self._lifetime_monitor) def shutdown(self, timeout=None): self._running = False if self._condition is not None: # Wake the monitor thread with self._condition: self._condition.notify() if self._thread is not None: try: self._thread.kill() except Exception: pass self._delegate.shutdown(timeout) def _lifetime_monitor(self): while self._running: now = time.time() sleep_delta = None with self._lock: if self._last_render_time is not None: idle_target = self.config[ 'idle_shutdown_sec'] + self._last_render_time if now >= idle_target: self._logger.info(u"Shutting down idle renderer.") self._last_render_time = None self._start_time = None self._delegate.shutdown() self._running = False break else: sleep_delta = idle_target - now if self._start_time is not None: expired_target = self.config[ 'max_lifetime_sec'] + self._start_time if now >= expired_target: self._logger.info( u"Shutting down renderer which reached max lifetime." ) self._last_render_time = None self._start_time = None self._delegate.shutdown() self._running = False break else: if sleep_delta is not None: sleep_delta = min(sleep_delta, expired_target - now) else: sleep_delta = expired_target - now if sleep_delta is not None: with self._condition: if self._running: self._condition.wait(sleep_delta)
def rlock_object(self): return green_threading.RLock()
def __init__(self, bitHopper): self.data = {} self.bitHopper = bitHopper self.lock = threading.RLock() eventlet.spawn_n(self.prune)
def connect_to_leader(cert=None, name=None, leader=None): global currentleader global cfginitlock global follower if cfginitlock is None: cfginitlock = threading.RLock() if leader is None: leader = currentleader try: remote = connect_to_collective(cert, leader) except socket.error: return False with connecting: with cfginitlock: tlvdata.recv(remote) # the banner tlvdata.recv(remote) # authpassed... 0.. if name is None: name = get_myname() tlvdata.send( remote, { 'collective': { 'operation': 'connect', 'name': name, 'txcount': cfm._txcount } }) keydata = tlvdata.recv(remote) if not keydata: return False if 'error' in keydata: if 'backoff' in keydata: eventlet.spawn_after(random.random(), connect_to_leader, cert, name, leader) return True if 'leader' in keydata: ldrc = cfm.get_collective_member_by_address( keydata['leader']) if ldrc and ldrc['name'] == name: raise Exception("Redirected to self") return connect_to_leader(name=name, leader=keydata['leader']) if 'txcount' in keydata: return become_leader(remote) print(keydata['error']) return False if follower is not None: follower.kill() cfm.stop_following() follower = None colldata = tlvdata.recv(remote) globaldata = tlvdata.recv(remote) dbi = tlvdata.recv(remote) dbsize = dbi['dbsize'] dbjson = '' while (len(dbjson) < dbsize): ndata = remote.recv(dbsize - len(dbjson)) if not ndata: try: remote.close() except Exception: pass raise Exception("Error doing initial DB transfer") dbjson += ndata cfm.clear_configuration() try: cfm._restore_keys(keydata, None, sync=False) for c in colldata: cfm._true_add_collective_member(c, colldata[c]['address'], colldata[c]['fingerprint'], sync=False) for globvar in globaldata: cfm.set_global(globvar, globaldata[globvar], False) cfm._txcount = dbi.get('txcount', 0) cfm.ConfigManager(tenant=None)._load_from_json(dbjson, sync=False) cfm.commit_clear() except Exception: cfm.stop_following() cfm.rollback_clear() raise currentleader = leader #spawn this as a thread... follower = eventlet.spawn(follow_leader, remote) return True