def server_update(self, ): #logging.debug('server_update', cat='server_update') diff_time = time.time() - self.lastcalled self.lastcalled = time.time() current = self.bitHopper.pool.get_current() shares, info = self.server_to_btc_shares(current) info['slice'] = info['slice'] - diff_time #logging.debug(current_server + ' slice ' + str(info['slice']), cat='server_update' ) if not self.initDone: self.bitHopper.select_best_server() return True if info['slice'] <= 0: return True # shares are now less than shares at time of slicing (new block found?) if info['slicedShares'] > info['shares']: logging.debug("slicedShares > shares") return True # double check role if info['role'] not in self.valid_roles: return True # check to see if threshold exceeded difficulty = self.bitHopper.difficulty['btc'] min_shares = difficulty * self.difficultyThreshold if shares > min_shares: logging.debug("shares > min_shares") info['slice'] = -1 # force switch return True return False
def server_update(self,): #self.bitHopper.log_msg(str(self.sliceinfo)) diff_time = time.time()-self.lastcalled self.lastcalled = time.time() current = self.sliceinfo[self.bitHopper.pool.get_current()] if current == -1: return True self.sliceinfo[self.bitHopper.pool.get_current()] += diff_time if self.bitHopper.pool.servers[self.bitHopper.pool.get_current()]['role'] not in self.valid_roles: return True valid = [] for k in self.sliceinfo: if self.sliceinfo[k] != -1: valid.append(k) if len(valid) <=1: return True for server in valid: if current - self.sliceinfo[server] > 30: return True difficulty = self.bitHopper.difficulty.get_difficulty() min_shares = difficulty * self.difficultyThreshold shares = self.server_to_btc_shares(self.bitHopper.pool.get_current())[0] if shares > min_shares: return True return False
def server_update(self,): #self.bitHopper.log_dbg('server_update', cat='server_update') diff_time = time.time()-self.lastcalled self.lastcalled = time.time() current = self.bitHopper.pool.get_current() shares,info = self.server_to_btc_shares(current) info['slice'] = info['slice'] - diff_time #self.bitHopper.log_dbg(current_server + ' slice ' + str(info['slice']), cat='server_update' ) if self.initDone == False: self.bitHopper.select_best_server() return True if info['slice'] <= 0: return True # shares are now less than shares at time of slicing (new block found?) if info['slicedShares'] > info['shares']: self.bitHopper.log_dbg("slicedShares > shares") return True # double check role if info['role'] not in self.valid_roles: return True # check to see if threshold exceeded difficulty = self.bitHopper.difficulty.get_difficulty() min_shares = difficulty * self.difficultyThreshold if shares > min_shares: self.bitHopper.log_dbg("shares > min_shares") info['slice'] = -1 # force switch return True return False
def user_share_add(self,user,password,shares,server): with self.lock: if user not in self.users: self.users[user] = {'shares':0,'rejects':0, 'last':0, 'shares_time': [], 'hash_rate': 0} self.users[user]['last'] = int(time.time()) self.users[user]['shares'] += shares self.users[user]['shares_time'].append(int(time.time())) self.users[user]['hash_rate'] = (len(self.users[user]['shares_time']) * 2**32) / (60 * 15 * 1000000)
def user_share_add(self,user,password,shares,server): with self.lock: if user not in self.users: self.users[user] = {'shares':0,'rejects':0, 'last':0, 'shares_time': [], 'hash_rate': 0} self.users[user]['last'] = int(time.time()) self.users[user]['shares'] += shares self.users[user]['shares_time'].append(int(time.time())) self.users[user]['hash_rate'] = (len(self.users[user]['shares_time']) * 2**32) / (60 * 5 * 1000000)
def run(self): while self.should_run: start_time = time.time() if isinstance(self.commandline, six.string_types): self.process = subprocess.Popen(self.commandline, close_fds=True, env=self.env, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: self.process = subprocess.Popen(self.commandline, close_fds=True, env=self.env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.process.stdin.close() if self.separator == '\n': def process_pipe(pipe): while not pipe.closed: line = pipe.readline() if not line: break line = line.decode('utf8') self.emit({"message": line.rstrip('\n')}) else: def process_pipe(pipe): buf = u"" for chunk in codecs.iterdecode(pipe, 'utf8'): buf += chunk messages = buf.split(self.separator) buf = messages[-1] messages = messages[:-1] for message in messages: self.emit({"message": message}) if buf: self.emit({"message": buf}) stdout_thread = eventlet.spawn(process_pipe, self.process.stdout) stderr_thread = eventlet.spawn(process_pipe, self.process.stderr) self.process.wait() self.process = None stdout_thread.wait() stderr_thread.wait() took = time.time() - start_time time.sleep(self.interval - took)
def testSetBlocking(self): # Testing whether set blocking works self.serv.setblocking(0) start = time.time() try: self.serv.accept() except socket.error: pass end = time.time() self.assert_((end - start) < 1.0, "Error setting non-blocking mode.")
def update_rate(self): self.old_time=time.time() while True: now = time.time() diff = now -self.old_time if diff <=0: diff = 1e-10 self.old_time = now self.rate = int((float(self.shares) * (2**32)) / (diff * 1000000)) self.shares = 0 eventlet.sleep(60)
def test1(): s = '' for i in range(256): s = s + chr(i) s = s*4 t0 = time.time() qs = quote(s) uqs = unquote(qs) t1 = time.time() if uqs != s: print 'Wrong!' print repr(s) print repr(qs) print repr(uqs) print round(t1 - t0, 3), 'sec'
def testRecvTimeout(self): # Test recv() timeout _timeout = 0.02 self.sock.connect(self.addr_remote) self.sock.settimeout(_timeout) _t1 = time.time() self.failUnlessRaises(socket.error, self.sock.recv, 1024) _t2 = time.time() _delta = abs(_t1 - _t2) self.assert_(_delta < _timeout + self.fuzz, "timeout (%g) is %g seconds more than expected (%g)" %(_delta, self.fuzz, _timeout))
def testRecvfromTimeout(self): # Test recvfrom() timeout _timeout = 2 self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.settimeout(_timeout) self.sock.bind(self.addr_local) _t1 = time.time() self.failUnlessRaises(socket.error, self.sock.recvfrom, 8192) _t2 = time.time() _delta = abs(_t1 - _t2) self.assert_(_delta < _timeout + self.fuzz, "timeout (%g) is %g seconds more than expected (%g)" %(_delta, self.fuzz, _timeout))
def update_rate(self, loop=True): self.old_time = time.time() while True: now = time.time() diff = now - self.old_time if diff <= 0: diff = 1e-10 self.old_time = now self.rate = int((float(self.shares) * (2**32)) / (diff * 1000000)) self.shares = 0 if loop: eventlet.sleep(60) else: return
def testAcceptTimeout(self): # Test accept() timeout _timeout = 2 self.sock.settimeout(_timeout) self.sock.bind(self.addr_local) self.sock.listen(5) _t1 = time.time() self.failUnlessRaises(socket.error, self.sock.accept) _t2 = time.time() _delta = abs(_t1 - _t2) self.assert_(_delta < _timeout + self.fuzz, "timeout (%g) is %g seconds more than expected (%g)" %(_delta, self.fuzz, _timeout))
def recv(self, flags=0, copy=True, track=False): """A recv method that's safe to use when multiple greenthreads are calling send, send_multipart, recv and recv_multipart on the same socket. """ if flags & NOBLOCK: msg = _Socket_recv(self, flags, copy, track) # Instead of calling both wake methods, could call # self.getsockopt(EVENTS) which would trigger wakeups if # needed. self._eventlet_send_event.wake() self._eventlet_recv_event.wake() return msg if hasattr(__zmq__, 'RCVTIMEO'): setting = self.getsockopt(RCVTIMEO) if setting == -1: expires = None elif setting > 0: expires = time.time() + setting / 1000.0 else: raise ValueError(setting) else: expires = None flags |= NOBLOCK with self._eventlet_recv_lock: while True: try: return _Socket_recv(self, flags, copy, track) except ZMQError as e: if e.errno == EAGAIN: timeout = expires - time.time( ) if expires is not None else None if timeout and timeout < 0: # zmq in its wisdom decided to # reuse EAGAIN for both timeouts # and nonblocking lack of data raise self._eventlet_recv_event.block(timeout=timeout) else: raise finally: # The call to recv processes 0mq events and may # make the socket ready to send. Wake the next # receiver. (Could check EVENTS for POLLOUT here) self._eventlet_send_event.wake()
def log_date_time_string(self): """Return the current time formatted for logging.""" now = time.time() year, month, day, hh, mm, ss, x, y, z = time.localtime(now) s = "%02d/%3s/%04d %02d:%02d:%02d" % ( day, self.monthname[month], year, hh, mm, ss) return s
def __init__(self, shell_command, subprocess_env, shell_id, username, delegation_token_dir): try: user_info = pwd.getpwnam(username) except KeyError: LOG.error("Unix user account didn't exist at subprocess creation. Was it deleted?") raise parent, child = pty.openpty() try: tty.setraw(parent) except tty.error: LOG.debug("Could not set parent fd to raw mode, user will see duplicated input.") subprocess_env[constants.HOME] = user_info.pw_dir command_to_use = [_SETUID_PROG, str(user_info.pw_uid), str(user_info.pw_gid)] command_to_use.extend(shell_command) delegation_token_files = self._get_delegation_tokens(username, delegation_token_dir) if delegation_token_files: merged_token_file = self._merge_delegation_tokens(delegation_token_files, delegation_token_dir) delegation_token_files = [merged_token_file] subprocess_env[constants.HADOOP_TOKEN_FILE_LOCATION] = merged_token_file.name try: LOG.debug("Starting subprocess with command '%s' and environment '%s'" % (command_to_use, subprocess_env,)) p = subprocess.Popen(command_to_use, stdin=child, stdout=child, stderr=child, env=subprocess_env, close_fds=True) except (OSError, ValueError): os.close(parent) os.close(child) raise msg_format = "%s - shell_id:%s pid:%d - args:%s" msg_args = (username, shell_id, p.pid, ' '.join(command_to_use)) msg = msg_format % msg_args SHELL_OUTPUT_LOGGER.info(msg) SHELL_INPUT_LOGGER.info(msg) # State that shouldn't be touched by any other classes. self._output_buffer_length = 0 self._commands = [] self._fd = parent self._child_fd = child self.subprocess = p self.pid = p.pid self._write_buffer = cStringIO.StringIO() self._read_buffer = cStringIO.StringIO() self._delegation_token_files = delegation_token_files # State that's accessed by other classes. self.shell_id = shell_id self.username = username # Timestamp that is updated on shell creation and on every output request. Used so that we know # when to kill the shell. self.time_received = time.time() self.last_output_sent = False self.remove_at_next_iteration = False self.destroyed = False
def prune(self): while True: with self.lock: for key, work in self.data.items(): if work[1] < (time.time() - (60*5)): del self.data[key] eventlet.sleep(60)
def __init__(self, bitHopper): Scheduler.__init__(self, bitHopper) self.bh = bitHopper self.bitHopper = self.bh self.sliceinfo = {} self.lastcalled = time.time() self.reset()
def _load(self): """Load hosts file This will unconditionally (re)load the data from the hosts file. """ lines = self._readlines() self._v4.clear() self._v6.clear() self._aliases.clear() for line in lines: parts = line.split() if len(parts) < 2: continue ip = parts.pop(0) if is_ipv4_addr(ip): ipmap = self._v4 elif is_ipv6_addr(ip): if ip.startswith('fe80'): # Do not use link-local addresses, OSX stores these here continue ipmap = self._v6 else: continue cname = parts.pop(0) ipmap[cname] = ip for alias in parts: ipmap[alias] = ip self._aliases[alias] = cname self._last_load = time.time()
def prune(self): while True: with self.lock: for key, work in self.data.items(): if work[1] < (time.time() - (60 * 5)): del self.data[key] eventlet.sleep(60)
def _generate_counts(self): for label, points in self._count_data.iteritems(): value = 0.0 for count, tstamp in points: value += float(count) now = int(time.time()) yield '%(label)s %(value)s %(now)s' % locals()
def _client_parse(self, buf, sin, client): l = protocol.PTP(buf) if l is None: self.ui.log("Client packet from %s failed to parse!" % repr(sin)) return False if l.buf_csum is None or l.buf_csum != l.csum: self.ui.log("Client packet from %s has a bad checksum!" % repr(sin)) return False client['ts'] = time.time() client['stats']['rcvd'] += 1 if self.args.debug: self.ui.log(repr(l)) for tlv in l.data: p = tlv.data if p.ptp_type == protocol.PTP_TYPE_CLIENTVER: client['clientver'] = p.data elif p.ptp_type == protocol.PTP_TYPE_SEQUENCE: client['sequence'] = p.data elif p.ptp_type == protocol.PTP_TYPE_UUID: client['uuid'] = p.data elif p.ptp_type == protocol.PTP_TYPE_MYTS: self._client_respond(client, p.data) client['myts'] = float(p.data) / float(2**32) elif p.ptp_type == protocol.PTP_TYPE_YOURTS: ts = float(p.data) / float(2**32) rtt = client['ts'] - ts client['stats']['rtt'] = rtt client['stats']['ackd'] += 1 self.ui.log("ACK from client %s; RTT %fs" % (str(sin), rtt)) self.ui.peer_update('client', client['sin'], client['stats']) return True
def refresh(self, force=True): log.debug("taking refresh lock") try: self.refresh_in_progress.acquire() log.debug("got refresh lock") if force or self.cache_expired(): try: new_resources = {} for realm in grid.realms(): provider = grid.info_provider(realm) if ICachingResourceEnumerator.providedBy(provider): if provider.stale(): provider.refresh() for resource in provider.enumerate(): new_resources[(realm, resource.hostname, resource.port, resource.lrms, resource.queue)] = resource self.resources = new_resources except Exception, exc: log.debug("Matchmaker refresh failed, working with old data (if any)") log.debug("Exception: %s: %s", repr(exc), str(exc)) write_traceback() return self.last_update = time() else:
def log_date_time_string(self): """Return the current time formatted for logging.""" now = time.time() year, month, day, hh, mm, ss, x, y, z = time.localtime(now) s = "%02d/%3s/%04d %02d:%02d:%02d" % (day, self.monthname[month], year, hh, mm, ss) return s
def _load(self): """Load hosts file This will unconditionally (re)load the data from the hosts file. """ lines = self._readlines() self._v4.clear() self._v6.clear() self._aliases.clear() for line in lines: parts = line.split() if len(parts) < 2: continue ip = parts.pop(0) if is_ipv4_addr(ip): ipmap = self._v4 elif is_ipv6_addr(ip): if ip.startswith('fe80'): # Do not use link-local addresses, OSX stores these here continue ipmap = self._v6 else: continue cname = parts.pop(0).lower() ipmap[cname] = ip for alias in parts: alias = alias.lower() ipmap[alias] = ip self._aliases[alias] = cname self._last_load = time.time()
def _bump_timeout(self): self._stop_timeout() self._timeout = self.server.io_loop.add_timeout( time.time() + self._timeout_interval, self._polling_timeout )
def setup(self, bitHopper): with self.lock: self.bitHopper = bitHopper for server in self.servers: self.servers[server]['shares'] = int(bitHopper.difficulty.get_difficulty()) self.servers[server]['ghash'] = -1 self.servers[server]['duration'] = -1 self.servers[server]['duration_temporal'] = 0 self.servers[server]['isDurationEstimated'] = False self.servers[server]['last_pulled'] = time.time() self.servers[server]['lag'] = False self.servers[server]['api_lag'] = False refresh_limit = self.bitHopper.config.getint('main', 'pool_refreshlimit') if 'refresh_time' not in self.servers[server]: self.servers[server]['refresh_time'] = refresh_limit else: self.servers[server]['refresh_time'] = int(self.servers[server]['refresh_time']) if 'refresh_limit' not in self.servers[server]: self.servers[server]['refresh_limit'] = refresh_limit else: self.servers[server]['refresh_limit'] = int(self.servers[server]['refresh_limit']) self.servers[server]['rejects'] = self.bitHopper.db.get_rejects(server) self.servers[server]['user_shares'] = self.bitHopper.db.get_shares(server) self.servers[server]['payout'] = self.bitHopper.db.get_payout(server) self.servers[server]['expected_payout'] = self.bitHopper.db.get_expected_payout(server) if 'api_address' not in self.servers[server]: self.servers[server]['api_address'] = server if 'name' not in self.servers[server]: self.servers[server]['name'] = server if 'role' not in self.servers[server]: self.servers[server]['role'] = 'disable' if 'lp_address' not in self.servers[server]: self.servers[server]['lp_address'] = None self.servers[server]['err_api_count'] = 0 self.servers[server]['pool_index'] = server self.servers[server]['default_role'] = self.servers[server]['role'] if self.servers[server]['default_role'] in ['info','disable']: self.servers[server]['default_role'] = 'mine' #Coin Handling if 'coin' not in self.servers[server]: if self.servers[server]['role'] in ['mine', 'info', 'backup', 'backup_latehop', 'mine_charity', 'mine_slush']: coin_type = 'btc' elif self.servers[server]['role'] in ['mine_nmc']: coin_type = 'nmc' elif self.servers[server]['role'] in ['mine_ixc']: coin_type = 'ixc' elif self.servers[server]['role'] in ['mine_i0c']: coin_type = 'i0c' elif self.servers[server]['role'] in ['mine_ssc']: coin_type = 'scc' else: coin_type = 'btc' self.servers[server]['coin'] = coin_type self.servers = OrderedDict(sorted(self.servers.items(), key=lambda t: t[1]['role'] + t[0])) self.build_server_map() if not self.started: self.update_api_servers() self.started = True
def recv(self, flags=0, copy=True, track=False): """A recv method that's safe to use when multiple greenthreads are calling send, send_multipart, recv and recv_multipart on the same socket. """ if flags & NOBLOCK: msg = _Socket_recv(self, flags, copy, track) # Instead of calling both wake methods, could call # self.getsockopt(EVENTS) which would trigger wakeups if # needed. self._eventlet_send_event.wake() self._eventlet_recv_event.wake() return msg if hasattr(__zmq__, 'RCVTIMEO'): setting = self.getsockopt(RCVTIMEO) if setting == -1: expires = None elif setting > 0: expires = time.time() + setting / 1000.0 else: raise ValueError(setting) else: expires = None flags |= NOBLOCK with self._eventlet_recv_lock: while True: try: return _Socket_recv(self, flags, copy, track) except ZMQError as e: if e.errno == EAGAIN: timeout = expires - time.time() if expires is not None else None if timeout and timeout < 0: # zmq in its wisdom decided to # reuse EAGAIN for both timeouts # and nonblocking lack of data raise self._eventlet_recv_event.block(timeout=timeout) else: raise finally: # The call to recv processes 0mq events and may # make the socket ready to send. Wake the next # receiver. (Could check EVENTS for POLLOUT here) self._eventlet_send_event.wake()
def __init__(self, shell_command, shell_id, username, delegation_token_dir): subprocess_env = {} env = desktop.lib.i18n.make_utf8_env() for item in constants.PRESERVED_ENVIRONMENT_VARIABLES: value = env.get(item) if value: subprocess_env[item] = value try: user_info = pwd.getpwnam(username) except KeyError: LOG.error("Unix user account didn't exist at subprocess creation. Was it deleted?") raise parent, child = pty.openpty() try: tty.setraw(parent) except tty.error: LOG.debug("Could not set parent fd to raw mode, user will see echoed input.") subprocess_env[constants.HOME] = user_info.pw_dir command_to_use = [_SETUID_PROG, str(user_info.pw_uid), str(user_info.pw_gid)] command_to_use.extend(shell_command) delegation_token_files = self._get_delegation_tokens(username, delegation_token_dir) if delegation_token_files: delegation_token_files = [token_file.name for token_file in delegation_token_files] subprocess_env[constants.HADOOP_TOKEN_FILE_LOCATION] = ','.join(delegation_token_files) try: LOG.debug("Starting subprocess with command '%s' and environment '%s'" % (command_to_use, subprocess_env,)) p = subprocess.Popen(command_to_use, stdin=child, stdout=child, stderr=child, env=subprocess_env, close_fds=True) except (OSError, ValueError): os.close(parent) os.close(child) raise # State that shouldn't be touched by any other classes. self._output_buffer_length = 0 self._commands = [] self._fd = parent self._child_fd = child self.subprocess = p self.pid = p.pid self._write_buffer = cStringIO.StringIO() self._read_buffer = cStringIO.StringIO() self._delegation_token_files = delegation_token_files # State that's accessed by other classes. self.shell_id = shell_id # Timestamp that is updated on shell creation and on every output request. Used so that we know # when to kill the shell. self.time_received = time.time() self.last_output_sent = False self.remove_at_next_iteration = False self.destroyed = False
def __init__(self, bitHopper): Scheduler.__init__(self, bitHopper) self.bitHopper = bitHopper self.sliceinfo = {} self.slicesize = 30 self.lastcalled = time.time() self.loadConfig() self.reset()
def _entering_idle(self): """ Call all the registered idle callbacks. """ for callback in self._idle_callbacks.values(): callback() eventlet.sleep(0.01) self._idle_ts = time.time()
def run(self): self.set(self) self.started = time.time() self.enter() self.fire("started") try: result = self.task.function(*self.args, **self.kwargs) except: self.record_exc(*sys.exc_info()) result = ERROR finally: self.sync() self.check_children(result) self.finished = time.time() self.exit() self.set(self.parent) self.fire("finished")
def receive(self, body, server): if server in self.polled: self.polled[server].release() self.bitHopper.log_dbg('received lp from: ' + server) info = self.bitHopper.pool.servers[server] if info['role'] in ['mine_nmc', 'disable', 'mine_ixc', 'mine_i0c', 'mine_scc', 'info']: return if body == None: self.bitHopper.log_dbg('error in long pool from: ' + server) with self.lock: if server not in self.errors: self.errors[server] = 0 self.errors[server] += 1 #timeout? Something bizarre? if self.errors[server] < 3 or info['role'] == 'mine_deepbit': eventlet.sleep(1) eventlet.spawn_after(0,self.pull_lp, self.pool.servers[server]['lp_address'],server, False) return try: output = True response = json.loads(body) work = response['result'] data = work['data'] block = data.decode('hex')[0:64] block = wordreverse(block) block = block.encode('hex')[56:120] #block = int(block, 16) with self.lock: if block not in self.blocks: if bytereverse(block) in self.blocks: block = bytereverse(block) self.bitHopper.log_msg('New Block: ' + str(block)) self.bitHopper.log_msg('Block Owner ' + server) self.add_block(block, work, server) #Add the lp_penalty if it exists. with self.lock: offset = self.pool.servers[server].get('lp_penalty','0') self.blocks[block][server] = time.time() + float(offset) self.bitHopper.log_dbg('EXACT ' + str(server) + ': ' + str(self.blocks[block][server])) if self.blocks[block]['_owner'] == None or self.blocks[block][server] < self.blocks[block][self.blocks[block]['_owner']]: self.set_owner(server,block) if self.bitHopper.lpBot != None: self.bitHopper.lpBot.announce(server, block) except Exception, e: output = False self.bitHopper.log_dbg('Error in Long Pool ' + str(server) + str(body)) #traceback.print_exc() if server not in self.errors: self.errors[server] = 0 with self.lock: self.errors[server] += 1 #timeout? Something bizarre? if self.errors[server] > 3 and info['role'] != 'mine_deepbit': return
def get_users(self): with self.lock: users = {} for item in self.users: if self.users[item]['shares'] > 0: shares_time = self.users[item]['shares_time'] if len(shares_time) > 0 and time.time()-max(shares_time) < int(self.user_drop_time): users[item] = self.users[item] return users
def _session_idle_check(self): """Checks the idle timeouts for all sessions.""" start = time.time() for session in self.sessions.values(): if session is None: continue elif not session.idle or not session.connected: continue elif (time.time() > (session.time_last_request or 0) + session.device.MAX_IDLE_TIME): logging.debug('Session disconnect (idle for %d sec): %s', session.device.MAX_IDLE_TIME, session.device.name) session.disconnect() # Re-schedule ourself for execution. elapsed = max(0, time.time() - start) wait_time = max(0, self._session_maint_period - elapsed) eventlet.spawn_after(wait_time, self._session_idle_check)
def defer_packet_event(self, ev, pkt): """ Defer the packet until the orchestrator permits """ assert isinstance(ev, PacketEvent) assert ev.deferred self.deferred_events[ev.uuid] = {'event': ev, 'packet': pkt, 'time': time.time()} LOG.debug('Defer event uuid=%s, packet=%s, deferred(after defer)=%d', ev.uuid, pkt.mysummary(), len(self.deferred_events))
def prune(self): while True: with self.lock: for user in self.users: for share_time in self.users[user]['shares_time']: if time.time() - share_time > 60 * 5: self.users[user]['shares_time'].remove(share_time) self.users[user]['hash_rate'] = (len(self.users[user]['shares_time']) * 2**32) / (60 * 5 * 1000000) eventlet.sleep(30)
def _server_beacons(self, shutdown=False): # Tell the server about ourself l = protocol.PTP(data=[]) l.data = [] t = protocol.TLV(type=protocol.PTP_TYPE_CLIENTVER, data=protocol.UInt(size=1, data=PTP_CLIENTVER)) l.data.append(t) t = protocol.TLV(type=protocol.PTP_TYPE_SEQUENCE, data=protocol.UInt(size=4, data=self.server_seq)) l.data.append(t) t = protocol.TLV(type=protocol.PTP_TYPE_UUID, data=protocol.String(data=self.uuid)) l.data.append(t) t = protocol.TLV(type=protocol.PTP_TYPE_PTPADDR, data=protocol.Address(data=(self.addr, self.port))) l.data.append(t) if shutdown: t = protocol.TLV(type=protocol.PTP_TYPE_SHUTDOWN, data=protocol.UInt(size=1, data=1)) l.data.append(t) else: t = protocol.TLV(type=protocol.PTP_TYPE_MYTS, data=protocol.UInt(size=8, data=int(time.time() * 2**32))) l.data.append(t) tmp = copy.deepcopy(self.servers) for k in tmp: if 'uuid' in tmp[k]: del (tmp[k]['uuid']) t = protocol.TLV(type=protocol.PTP_TYPE_META, data=protocol.JSON(data=tmp)) l.data.append(t) packet = l.pack() if len(packet) > protocol.PTP_MTU: # bad self.ui.log( "Ignoring attempt to send %d bytes to servers. MTU is %d" % (len(packet), protocol.PTP_MTU)) return if self.args.debug: self.ui.log("Sending %d bytes to servers:" % len(packet)) self.ui.log("%s" % repr(protocol.PTP(packet)), indent=' ') if self.args.hexdump: self.ui.log(hexdump.hexdump(result='return', data=packet)) with self._slock: for k in self.servers: server = self.servers[k] server['stats']['sent'] += 1 self.sock.sendto(packet, server['sin']) self.ui.peer_update('server', server['sin'], server['stats']) self.server_seq += 1L
def __init__(self, bitHopper): Scheduler.__init__(self, bitHopper) self.bitHopper = bitHopper self.sliceinfo = {} self.slicesize = 30 self.lastcalled = time.time() self.loadConfig() for server in self.bitHopper.pool.servers: self.sliceinfo[server] = -1 self.reset()
def prune(self): while True: with self.lock: for user in self.users: for share_time in self.users[user]['shares_time']: if time.time() - share_time > 60 * 15: if len(self.users[user]['shares_time']) > 1: self.users[user]['shares_time'].remove(share_time) self.users[user]['hash_rate'] = (len(self.users[user]['shares_time']) * 2**32) / (60 * 15 * 1000000) eventlet.sleep(30)
def _session_idle_check(self): """Checks the idle timeouts for all sessions.""" start = time.time() for session in self.sessions.values(): if session is None: continue elif not session.idle or not session.connected: continue elif (time.time() > (session.time_last_request or 0) + session.device.MAX_IDLE_TIME): logging.debug('Session disconnect (idle for %d sec): %s', session.device.MAX_IDLE_TIME, session.device.name) session.disconnect() # Re-schedule ourself for execution. elapsed = max(0, time.time() - start) wait_time = max(0, self._session_maint_period - elapsed) eventlet.spawn_after( wait_time, self._session_idle_check)
def defer_packet_event(self, metadata, event): """ Defer the packet until the orchestrator permits """ assert isinstance(event, PacketEvent) assert event.deferred LOG.debug('Defer event=%s, deferred+:%d->%d', event, len(self.deferred_events), len(self.deferred_events) + 1) self.deferred_events[event.uuid] = { 'event': event, 'metadata': metadata, 'time': time.time()}
def resolve_domain(self, domain_block, domain): """ Perform a DNS lookup for the domain. Set the DNS to expire at the longest of the domain's TTL returned from DNS lookup or it's check frequency. This prevents us from doing multiple DNS lookups in between HTTP checks. """ start = time.time() ip_address, ttl = self.resolve_ip_address(domain) ttl = max(ttl, self.domains[domain_block][domain][FREQ_IDX]) now = time.time() if ip_address != None: self.domains[domain_block][domain][IP_ADDRESS_IDX] = ip_address self.domains[domain_block][domain][DNS_TTL_EXPIRE_IDX] = now + ttl self.domains[domain_block][domain][IN_FLIGHT_IDX] = False self.log.info("Resolved DNS for %s (%s - %s) in %.2f seconds" % (domain, ip_address, ttl, now - start))
def sh(*args, **kwargs): output_transform = kwargs.pop("output_transform", lambda l: l) expected = kwargs.pop("expected", (0, )) output_buffer = kwargs.pop("output_buffer", 10) cmd = tuple(str(a) for a in args) kwcopy = kwargs.copy() parts = [] cwd = kwcopy.pop("cwd", None) if cwd is not None and not os.path.samefile(cwd, os.getcwd()): relcwd = os.path.relpath(cwd) abscwd = os.path.abspath(cwd) mincwd = relcwd if len(relcwd) < len(abscwd) else abscwd parts.append("[%s]" % mincwd) env = kwcopy.pop("env", None) if env is not None: for k, v in env.items(): if v != os.environ.get(k, None): parts.append("%s=%s" % (k, v)) parts.extend(str(elide(a)) for a in args) command = " ".join(parts) try: p = Popen(cmd, stderr=STDOUT, stdout=PIPE, **kwargs) output = "" line_buffer = [command] start = time.time() for line in p.stdout: output += line line_buffer.append(output_transform(line[:-1])) elapsed = time.time() - start if (len(line_buffer) > output_buffer) or (elapsed > 1.0): while line_buffer: task.info(line_buffer.pop(0)) start = time.time() while line_buffer: task.info(line_buffer.pop(0)) p.wait() result = SHResult(command, p.returncode, output) except OSError, e: raise TaskError("error executing command '%s': %s" % (command, e))
def server(my_results): (sock, addr) = listener.accept() datasize = 0 t1 = None t2 = None try: while True: data = sock.recv(recvsize) if not t1: t1 = time.time() - base_time if not data: t2 = time.time() - base_time my_results.append(datasize) my_results.append((t1,t2)) break datasize += len(data) finally: sock.close()
def process_command(self, username, shell_id, command): """ Find the shell specified by the (username, shell_id) tuple, and then write the incoming command to that shell. """ shell_instance = self._shells.get((username, shell_id)) if not shell_instance: return {constants.NO_SHELL_EXISTS: True} shell_instance.time_received = time.time() command += "\n" return shell_instance.process_command(command)
def wait(self, timeout=None, check_interval=0.01): # Instead of a blocking OS call, this version of wait() uses logic # borrowed from the eventlet 0.2 processes.Process.wait() method. if timeout is not None: endtime = time.time() + timeout try: while True: status = self.poll() if status is not None: return status if timeout is not None and time.time() > endtime: raise TimeoutExpired(self.args, timeout) eventlet.sleep(check_interval) except OSError as e: if e.errno == errno.ECHILD: # no child process, this happens if the child process # already died and has been cleaned up return -1 else: raise
def resolve(name): error = None rrset = None if rrset is None or time.time() > rrset.expiration: try: rrset = resolver.query(name) except dns.exception.Timeout, e: error = (socket.EAI_AGAIN, 'Lookup timed out') except dns.exception.DNSException, e: error = (socket.EAI_NODATA, 'No address associated with hostname')
def get_previous_output(self, username, shell_id): """ Called when the Hue session is restored. Get the outputs that we have previously written out to the client as one big string. """ shell_instance = self._shells.get((username, shell_id)) if not shell_instance: return { constants.SHELL_KILLED : True } shell_instance.time_received = time.time() output, next_offset = shell_instance.get_previous_output() commands = shell_instance.get_previous_commands() return { constants.SUCCESS: True, constants.OUTPUT: output, constants.NEXT_OFFSET: next_offset, constants.COMMANDS: commands}
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, tcp=False, source=None, raise_on_no_answer=True): """Query the hosts file The known rdtypes are dns.rdatatype.A, dns.rdatatype.AAAA and dns.rdatatype.CNAME. The ``rdclass`` parameter must be dns.rdataclass.IN while the ``tcp`` and ``source`` parameters are ignored. Return a HostAnswer instance or raise a dns.resolver.NoAnswer exception. """ now = time.time() if self._last_load + self.interval < now: self._load() rdclass = dns.rdataclass.IN if isinstance(qname, six.string_types): name = qname qname = dns.name.from_text(qname) else: name = str(qname) name = name.lower() rrset = dns.rrset.RRset(qname, rdclass, rdtype) rrset.ttl = self._last_load + self.interval - now if rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.A: addr = self._v4.get(name) if not addr and qname.is_absolute(): addr = self._v4.get(name[:-1]) if addr: rrset.add(dns.rdtypes.IN.A.A(rdclass, rdtype, addr)) elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.AAAA: addr = self._v6.get(name) if not addr and qname.is_absolute(): addr = self._v6.get(name[:-1]) if addr: rrset.add(dns.rdtypes.IN.AAAA.AAAA(rdclass, rdtype, addr)) elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.CNAME: cname = self._aliases.get(name) if not cname and qname.is_absolute(): cname = self._aliases.get(name[:-1]) if cname: rrset.add( dns.rdtypes.ANY.CNAME.CNAME(rdclass, rdtype, dns.name.from_text(cname))) return HostsAnswer(qname, rdtype, rdclass, rrset, raise_on_no_answer)
def run(self): while True: now = time.time() for server in self.bitHopper.pool.servers: info = self.bitHopper.pool.get_entry(server) if info['role'] == 'api_disable': delta = now - info['last_pulled'] self.log_dbg('Check api_disable server: ' + server + ' last_pulled: ' + str(info['last_pulled']) + ' / ' + str(now) + ' delta: ' + str(delta)) if delta > self.reincarnateInterval: self.log_msg('Restoring server: ' + server) info['role'] = info['default_role'] eventlet.sleep(self.interval)
def server_update(self, ): #logging.info(str(self.sliceinfo)) diff_time = time.time() - self.lastcalled self.lastcalled = time.time() current = self.sliceinfo[self.bitHopper.pool.get_current()] if current == -1: return True self.sliceinfo[self.bitHopper.pool.get_current()] += diff_time if self.bitHopper.pool.servers[self.bitHopper.pool.get_current( )]['role'] not in self.valid_roles: return True valid = [] for k in self.sliceinfo: if self.sliceinfo[k] != -1: valid.append(k) if len(valid) <= 1: return True for server in valid: if current - self.sliceinfo[server] > 30: return True difficulty = self.bitHopper.difficulty['btc'] min_shares = difficulty * self.difficultyThreshold shares = self.server_to_btc_shares( self.bitHopper.pool.get_current())[0] if shares > min_shares: return True return False
def _loop(self): """ A single iteration of the event loop """ ready = self._poll.poll(0) if ready is not None and len(ready): for fd, event in ready: if event == select.POLLIN: self._watch_files[fd]() else: self._entering_idle() # Make sure idle cb's don't get starved if time.time() - self._idle_ts > 0.5: self._entering_idle()
def _net_write(sock, data, expiration): """coro friendly replacement for dns.query._net_write Write the specified data to the socket. A Timeout exception will be raised if the operation is not completed by the expiration time. """ current = 0 l = len(data) while current < l: try: current += sock.send(data[current:]) except socket.timeout: ## Q: Do we also need to catch coro.CoroutineSocketWake and pass? if expiration - time.time() <= 0.0: raise dns.exception.Timeout