def loadConfig(self): parser = ConfigParser.SafeConfigParser() read = self.load_file('user.cfg', parser) if len(read) == 0: bitHopper.log_msg("user.cfg not found. You may need to move it from user.cfg.default") os._exit(1) userpools = parser.sections() for file_name in self.pool_configs: read = self.load_file(file_name, parser) if len(read) == 0: self.bitHopper.log_msg(file_name + " not found.") if self.initialized == False: os._exit(1) for pool in userpools: self.servers[pool] = dict(parser.items(pool)) if self.servers == {}: bitHopper.log_msg("No pools found in pools.cfg or user.cfg") if self.initialized == False: self.current_server = pool else: self.setup(self.bitHopper) self.initialized = True
def loadConfig(self): parser = ConfigParser.SafeConfigParser() read = self.load_file('user.cfg', parser) if len(read) == 0: logging.info( "user.cfg not found. You may need to move it from user.cfg.default" ) os._exit(1) userpools = parser.sections() read_items = 0 for file_name in self.pool_configs: read = self.load_file(file_name, parser) read_items += len(read) if len(read) == 0: logging.info(file_name + " not found.") if self.initialized == False: if read_items == 0: os._exit(1) for pool in userpools: self.servers[pool] = pool_class.Pool(pool, dict(parser.items(pool)), self.bitHopper) for pool in parser.sections(): try: if 'role' in dict( parser.items(pool)) and pool not in self.servers: self.servers[pool] = pool_class.Pool( pool, dict(parser.items(pool)), self.bitHopper) except: continue # random UA strings try: if (self.bitHopper.config.getboolean('main', 'use_random_ua')): ua_strings = self.bitHopper.config.get( 'main', 'random_ua_list').split('|') for pool in self.servers: if 'user_agent' not in self.servers[pool]: idx = random.randint(0, len(ua_strings) - 1) self.servers[pool]['user_agent'] = ua_strings[idx] except: traceback.print_exc() if self.servers == {}: logging.info("No pools found in pools.cfg or user.cfg") if len(self.current_list) == 0: self.current_list = [pool] self.bitHopper.db.check_database()
def select_best_server(self, ): server_name = self.scheduler.select_best_server() if not server_name: self.log_msg('FATAL Error, scheduler did not return any pool!') os._exit(-1) if self.pool.get_current() != server_name: self.pool.set_current(server_name) self.log_msg("Server change to " + str(self.pool.get_current())) return
def acceptor(self, pool): greenthread.getcurrent() while self.alive: try: conn, addr = self.socket.accept() gt = pool.spawn(self.handle, conn, addr) gt.link(self.cleanup, conn) conn, addr, gt = None, None, None except eventlet.StopServe: return except: self.log.exception("Unexpected error in acceptor. Sepuku.") os._exit(4)
def loadConfig(self): parser = ConfigParser.SafeConfigParser() read = self.load_file('user.cfg', parser) if len(read) == 0: logging.info("user.cfg not found. You may need to move it from user.cfg.default") os._exit(1) userpools = parser.sections() read_items = 0 for file_name in self.pool_configs: read = self.load_file(file_name, parser) read_items += len(read) if len(read) == 0: logging.info(file_name + " not found.") if self.initialized == False: if read_items == 0: os._exit(1) for pool in userpools: self.servers[pool] = pool_class.Pool(pool, dict(parser.items(pool)), self.bitHopper) for pool in parser.sections(): try: if 'role' in dict(parser.items(pool)) and pool not in self.servers: self.servers[pool] = pool_class.Pool(pool, dict(parser.items(pool)), self.bitHopper) except: continue # random UA strings try: if ( self.bitHopper.config.getboolean('main', 'use_random_ua') ): ua_strings = self.bitHopper.config.get('main', 'random_ua_list').split('|') for pool in self.servers: if 'user_agent' not in self.servers[pool]: idx = random.randint(0, len(ua_strings)-1) self.servers[pool]['user_agent'] = ua_strings[idx] except: traceback.print_exc() if self.servers == {}: logging.info("No pools found in pools.cfg or user.cfg") if len(self.current_list) == 0: self.current_list = [pool] self.bitHopper.db.check_database()
def select_best_server(self, ): if self.scheduler == None: server_list = [self.pool.servers.keys()[0]] backup_list = [] else: server_list, backup_list = self.scheduler.select_best_server() old_server = self.pool.get_current() #Find the server with highest priority max_priority = 0; for server in server_list: info = self.pool.get_entry(server) if info['priority'] > max_priority: max_priority = info['priority'] #Return all servers with this priority server_list = [server for server in server_list if lambda x:self.pool.get_entry(x)['priority'] >= max_priority] if len(server_list) == 0: try: backup_type = self.config.get('main', 'backup_type') except: backup_type = 'rejectrate' if backup_type == 'slice': server_list = backup_list elif backup_type == 'rejectrate': server_list = [backup_list[0]] elif backup_type == 'earlyhop': backup_list.sort(key=lambda pool:self.pool.servers[pool]['shares']) server_list = [backup_list[0]] elif backup_type == 'latehop': backup_list.sort(key=lambda pool: -1*self.pool.servers[pool]['shares']) server_list = [backup_list[0]] if len(server_list) == 0: logging.info('FATAL Error, scheduler did not return any pool!') os._exit(1) self.pool.current_list = server_list self.pool.build_server_map() return
def loadConfig(self): parser = ConfigParser.SafeConfigParser() read = self.load_file("user.cfg", parser) if len(read) == 0: self.bitHopper.log_msg("user.cfg not found. You may need to move it from user.cfg.default") os._exit(1) userpools = parser.sections() for file_name in self.pool_configs: read = self.load_file(file_name, parser) if len(read) == 0: self.bitHopper.log_msg(file_name + " not found.") if self.initialized == False: os._exit(1) for pool in userpools: self.servers[pool] = pool_class.Pool(pool, dict(parser.items(pool)), self.bitHopper) for pool in parser.sections(): try: if "role" in dict(parser.items(pool)) and pool not in self.servers: self.servers[pool] = pool_class.Pool(pool, dict(parser.items(pool)), self.bitHopper) except: continue # random UA strings try: if self.bitHopper.config.getboolean("main", "use_random_ua"): ua_strings = self.bitHopper.config.get("main", "random_ua_list").split("|") for pool in self.servers: if "user_agent" not in self.servers[pool]: idx = random.randint(0, len(ua_strings) - 1) self.servers[pool]["user_agent"] = ua_strings[idx] except: traceback.print_exc() if self.servers == {}: self.bitHopper.log_msg("No pools found in pools.cfg or user.cfg") if len(self.current_list) == 0: self.current_list = [pool] self.bitHopper.db.check_database()
def select_best_server(self, ): server_name = self.scheduler.select_best_server() if not server_name: self.log_msg('FATAL Error, scheduler did not return any pool!') os._exit(1) old_server = self.pool.get_current() if self.pool.get_current() != server_name: self.pool.set_current(server_name) self.log_msg("Server change to " + str(self.pool.get_current())) servers = self.pool.servers if servers[server_name]['coin'] != servers[old_server]['coin']: self.log_msg("Change in coin type. Triggering LP") work, server_headers, server = self.work.jsonrpc_getwork(server_name, [], {}, "", "") self.bitHopper.lp_callback.new_block(work, server_name) return
def run(self): self.socket.setblocking(1) pool = greenpool.GreenPool(self.worker_connections) acceptor = greenthread.spawn(self.acceptor, pool) while self.alive: self.notify() if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s" % self) greenthread.kill(acceptor, eventlet.StopServe) break eventlet.sleep(0.1) with eventlet.Timeout(self.timeout, False): pool.waitall() os._exit(3)
def select_best_server(self,): if self.scheduler == None: server_list = [self.pool.servers.keys()[0]] backup_list = [] else: server_list, backup_list = self.scheduler.select_best_server() old_server = self.pool.get_current() if len(server_list) == 0: try: backup_type = self.config.get("main", "backup_type") except: backup_type = "rejectrate" if backup_type == "slice": server_list = backup_list elif backup_type == "rejectrate": server_list = [backup_list[0]] elif backup_type == "earlyhop": backup_list.sort(key=lambda pool: self.pool.servers[pool]["shares"]) server_list = [backup_list[0]] elif backup_type == "latehop": backup_list.sort(key=lambda pool: -1 * self.pool.servers[pool]["shares"]) server_list = [backup_list[0]] if len(server_list) == 0: self.log_msg("FATAL Error, scheduler did not return any pool!") os._exit(1) self.pool.current_list = server_list self.pool.build_server_map() return
def run_cgi(self): """Execute a CGI script.""" dir, rest = self.cgi_info path = dir + '/' + rest i = path.find('/', len(dir) + 1) while i >= 0: nextdir = path[:i] nextrest = path[i + 1:] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): dir, rest = nextdir, nextrest i = path.find('/', len(dir) + 1) else: break # find an explicit query string, if present. rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. i = rest.find('/') if i >= 0: script, rest = rest[:i], rest[i:] else: script, rest = rest, '' scriptname = dir + '/' + script scriptfile = self.translate_path(scriptname) if not os.path.exists(scriptfile): self.send_error(HTTPStatus.NOT_FOUND, "No such CGI script (%r)" % scriptname) return if not os.path.isfile(scriptfile): self.send_error(HTTPStatus.FORBIDDEN, "CGI script is not a plain file (%r)" % scriptname) return ispy = self.is_python(scriptname) if self.have_fork or not ispy: if not self.is_executable(scriptfile): self.send_error( HTTPStatus.FORBIDDEN, "CGI script is not executable (%r)" % scriptname) return # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html # XXX Much of the following could be prepared ahead of time! env = copy.deepcopy(os.environ) env['SERVER_SOFTWARE'] = self.version_string() env['SERVER_NAME'] = self.server.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PROTOCOL'] = self.protocol_version env['SERVER_PORT'] = str(self.server.server_port) env['REQUEST_METHOD'] = self.command uqrest = urllib.parse.unquote(rest) env['PATH_INFO'] = uqrest env['PATH_TRANSLATED'] = self.translate_path(uqrest) env['SCRIPT_NAME'] = scriptname if query: env['QUERY_STRING'] = query env['REMOTE_ADDR'] = self.client_address[0] authorization = self.headers.get("authorization") if authorization: authorization = authorization.split() if len(authorization) == 2: import base64, binascii env['AUTH_TYPE'] = authorization[0] if authorization[0].lower() == "basic": try: authorization = authorization[1].encode('ascii') authorization = base64.decodebytes(authorization).\ decode('ascii') except (binascii.Error, UnicodeError): pass else: authorization = authorization.split(':') if len(authorization) == 2: env['REMOTE_USER'] = authorization[0] # XXX REMOTE_IDENT if self.headers.get('content-type') is None: env['CONTENT_TYPE'] = self.headers.get_content_type() else: env['CONTENT_TYPE'] = self.headers['content-type'] length = self.headers.get('content-length') if length: env['CONTENT_LENGTH'] = length referer = self.headers.get('referer') if referer: env['HTTP_REFERER'] = referer accept = [] for line in self.headers.getallmatchingheaders('accept'): if line[:1] in "\t\n\r ": accept.append(line.strip()) else: accept = accept + line[7:].split(',') env['HTTP_ACCEPT'] = ','.join(accept) ua = self.headers.get('user-agent') if ua: env['HTTP_USER_AGENT'] = ua co = filter(None, self.headers.get_all('cookie', [])) cookie_str = ', '.join(co) if cookie_str: env['HTTP_COOKIE'] = cookie_str # XXX Other HTTP_* headers # Since we're setting the env in the parent, provide empty # values to override previously set values for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): env.setdefault(k, "") self.send_response(HTTPStatus.OK, "Script output follows") self.flush_headers() decoded_query = query.replace('+', ' ') if self.have_fork: # Unix -- fork as we should args = [script] if '=' not in decoded_query: args.append(decoded_query) nobody = nobody_uid() self.wfile.flush() # Always flush before forking pid = os.fork() if pid != 0: # Parent pid, sts = os.waitpid(pid, 0) # throw away additional data [see bug #427345] while select.select([self.rfile], [], [], 0)[0]: if not self.rfile.read(1): break if sts: self.log_error("CGI script exit status %#x", sts) return # Child try: try: os.setuid(nobody) except OSError: pass os.dup2(self.rfile.fileno(), 0) os.dup2(self.wfile.fileno(), 1) os.execve(scriptfile, args, env) except: self.server.handle_error(self.request, self.client_address) os._exit(127) else: # Non-Unix -- use subprocess cmdline = [scriptfile] if self.is_python(scriptfile): interp = sys.executable if interp.lower().endswith("w.exe"): # On Windows, use python.exe, not pythonw.exe interp = interp[:-5] + interp[-4:] cmdline = [interp, '-u'] + cmdline if '=' not in query: cmdline.append(query) self.log_message("command: %s", subprocess.list2cmdline(cmdline)) try: nbytes = int(length) except (TypeError, ValueError): nbytes = 0 p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if self.command.lower() == "post" and nbytes > 0: data = self.rfile.read(nbytes) else: data = None # throw away additional data [see bug #427345] while select.select([self.rfile._sock], [], [], 0)[0]: if not self.rfile._sock.recv(1): break stdout, stderr = p.communicate(data) self.wfile.write(stdout) if stderr: self.log_error('%s', stderr) p.stderr.close() p.stdout.close() status = p.returncode if status: self.log_error("CGI script exit status %#x", status) else: self.log_message("CGI script exited OK")
def main(): parser = optparse.OptionParser(description='bitHopper') parser.add_option('--debug', action= 'store_true', default = False, help='Extra error output. Basically print all caught errors') parser.add_option('--trace', action= 'store_true', default = False, help='Extra debugging output') parser.add_option('--listschedulers', action='store_true', default = False, help='List alternate schedulers available') parser.add_option('--port', type = int, default=8337, help='Port to listen on') parser.add_option('--scheduler', type=str, default='DefaultScheduler', help='Select an alternate scheduler') parser.add_option('--threshold', type=float, default=None, help='Override difficulty threshold (default 0.43)') parser.add_option('--altslicesize', type=int, default=900, help='Override Default AltSliceScheduler Slice Size of 900') parser.add_option('--altminslicesize', type=int, default=60, help='Override Default Minimum Pool Slice Size of 60 (AltSliceScheduler only)') parser.add_option('--altslicejitter', type=int, default=0, help='Add some random variance to slice size, disabled by default (AltSliceScheduler only)') parser.add_option('--altsliceroundtimebias', action='store_true', default=False, help='Bias slicing slightly by round time duration with respect to round time target (default false)') parser.add_option('--altsliceroundtimetarget', type=int, default=1000, help='Round time target based on GHash/s (default 1000 Ghash/s)') parser.add_option('--altsliceroundtimemagic', type=int, default=10, help='Round time magic number, increase to bias towards round time over shares') parser.add_option('--config', type=str, default='bh.cfg', help='Select an alternate main config file from bh.cfg') parser.add_option('--p2pLP', action='store_true', default=False, help='Starts up an IRC bot to validate LP based hopping.') parser.add_option('--ip', type = str, default='', help='IP to listen on') parser.add_option('--auth', type = str, default=None, help='User,Password') parser.add_option('--logconnections', default = False, action='store_true', help='show connection log') parser.add_option('--simple_logging', default = False, action='store_true', help='remove RCP logging from output') options = parser.parse_args()[0] if options.trace == True: options.debug = True if options.listschedulers: schedulers = "" for s in scheduler.Scheduler.__subclasses__(): schedulers += ", " + s.__name__ print "Available Schedulers: " + schedulers[2:] return config = ConfigParser.ConfigParser() try: # determine if application is a script file or frozen exe if hasattr(sys, 'frozen'): application_path = os.path.dirname(sys.executable) elif __file__: application_path = os.path.dirname(__file__) if not os.path.exists(os.path.join(application_path, options.config)): print "Missing " + options.config + " may need to rename bh.cfg.default" os._exit(-1) config.read(os.path.join(application_path, options.config)) except: if not os.path.exists(options.config): print "Missing " + options.config + " may need to rename bh.cfg.default" os._exit(-1) config.read(options.config) bithopper_instance = BitHopper(options, config) if options.auth: auth = options.auth.split(',') bithopper_instance.auth = auth if len(auth) != 2: print 'User,Password. Not whatever you just entered' return # auth from config try: c = config.get('auth', 'username'), config.get('auth', 'password') bithopper_instance.auth = c except: pass override_scheduler = False if options.scheduler != None: scheduler_name = options.scheduler override_scheduler = True try: sched = config.get('main', 'scheduler') if sched != None: override_scheduler = True scheduler_name = sched except: pass if override_scheduler: bithopper_instance.log_msg("Selecting scheduler: " + scheduler_name) foundScheduler = False for s in scheduler.Scheduler.__subclasses__(): if s.__name__ == scheduler_name: bithopper_instance.scheduler = s(bithopper_instance) foundScheduler = True break if not foundScheduler: bithopper_instance.log_msg("Error couldn't find: " + scheduler_name + ". Using default scheduler.") bithopper_instance.scheduler = scheduler.DefaultScheduler(bithopper_instance) else: bithopper_instance.log_msg("Using default scheduler.") bithopper_instance.scheduler = scheduler.DefaultScheduler(bithopper_instance) bithopper_instance.select_best_server() if options.p2pLP: bithopper_instance.log_msg('Starting p2p LP') bithopper_instance.lpBot = LpBot(bithopper_instance) lastDefaultTimeout = socket.getdefaulttimeout() if options.logconnections: log = None else: log = open(os.devnull, 'wb') while True: try: listen_port = options.port try: listen_port = config.getint('main', 'port') except ConfigParser.Error: bithopper_instance.log_dbg("Unable to load main listening port from config file") pass socket.setdefaulttimeout(None) wsgi.server(eventlet.listen((options.ip,listen_port)),bithopper_instance.website.handle_start, log=log) socket.setdefaulttimeout(lastDefaultTimeout) break except Exception, e: bithopper_instance.log_msg("Exception in wsgi server loop, restarting wsgi in 60 seconds\n%s") % (e) eventlet.sleep(60)
## We're starting up for the first time. if options.daemonize: # Do the daemon dance. Note that this isn't what is considered good # daemonization, because frankly it's convenient to keep the file # descriptiors open (especially when there are prints scattered all # over the codebase.) # What we do instead is fork off, create a new session, fork again. # This leaves the process group in a state without a session # leader. pid = os.fork() if not pid: os.setsid() pid = os.fork() if pid: os._exit(0) else: os._exit(0) print "(%s) now daemonized" % (os.getpid(), ) # Close _all_ open (and othewise!) files. import resource maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 4096 for fdnum in xrange(maxfd): try: os.close(fdnum) except OSError, e: if e.errno != errno.EBADF: raise # Remap std{in,out,err}
def main(): parser = optparse.OptionParser(description="bitHopper") parser.add_option( "--debug", action="store_true", default=False, help="Extra error output. Basically print all caught errors" ) parser.add_option("--trace", action="store_true", default=False, help="Extra debugging output") parser.add_option( "--listschedulers", action="store_true", default=False, help="List alternate schedulers available" ) parser.add_option("--port", type=int, default=8337, help="Port to listen on") parser.add_option("--scheduler", type=str, default="DefaultScheduler", help="Select an alternate scheduler") parser.add_option("--threshold", type=float, default=None, help="Override difficulty threshold (default 0.43)") parser.add_option("--config", type=str, default="bh.cfg", help="Select an alternate main config file from bh.cfg") parser.add_option("--ip", type=str, default="", help="IP to listen on") parser.add_option("--auth", type=str, default=None, help="User,Password") parser.add_option("--logconnections", default=False, action="store_true", help="show connection log") # parser.add_option('--simple_logging', default = False, action='store_true', help='remove RCP logging from output') options = parser.parse_args()[0] if options.trace == True: options.debug = True if options.listschedulers: schedulers = "" for s in scheduler.Scheduler.__subclasses__(): schedulers += ", " + s.__name__ print "Available Schedulers: " + schedulers[2:] return config = ConfigParser.ConfigParser() try: # determine if application is a script file or frozen exe if hasattr(sys, "frozen"): application_path = os.path.dirname(sys.executable) elif __file__: application_path = os.path.dirname(__file__) if not os.path.exists(os.path.join(application_path, options.config)): print "Missing " + options.config + " may need to rename bh.cfg.default" os._exit(1) config.read(os.path.join(application_path, options.config)) except: if not os.path.exists(options.config): print "Missing " + options.config + " may need to rename bh.cfg.default" os._exit(1) config.read(options.config) bithopper_instance = BitHopper(options, config) if options.auth: auth = options.auth.split(",") bithopper_instance.auth = auth if len(auth) != 2: print "User,Password. Not whatever you just entered" return # auth from config try: c = config.get("auth", "username"), config.get("auth", "password") bithopper_instance.auth = c except: pass override_scheduler = False if options.scheduler != None: scheduler_name = options.scheduler override_scheduler = True try: sched = config.get("main", "scheduler") if sched != None: override_scheduler = True scheduler_name = sched except: pass if override_scheduler: bithopper_instance.log_msg("Selecting scheduler: " + scheduler_name) foundScheduler = False for s in scheduler.Scheduler.__subclasses__(): if s.__name__ == scheduler_name: bithopper_instance.scheduler = s(bithopper_instance) foundScheduler = True break if not foundScheduler: bithopper_instance.log_msg("Error couldn't find: " + scheduler_name + ". Using default scheduler.") bithopper_instance.scheduler = scheduler.DefaultScheduler(bithopper_instance) else: bithopper_instance.log_msg("Using default scheduler.") bithopper_instance.scheduler = scheduler.DefaultScheduler(bithopper_instance) bithopper_instance.select_best_server() lastDefaultTimeout = socket.getdefaulttimeout() if options.logconnections: log = None else: log = open(os.devnull, "wb") hook = plugins.Hook("plugins.bithopper.startup") hook.notify(bithopper_instance, config, options) while True: try: listen_port = options.port try: listen_port = config.getint("main", "port") except ConfigParser.Error: bithopper_instance.log_dbg("Unable to load main listening port from config file") pass socket.setdefaulttimeout(None) wsgi.server(eventlet.listen((options.ip, listen_port)), bithopper_instance.website.handle_start, log=log) socket.setdefaulttimeout(lastDefaultTimeout) break except Exception, e: bithopper_instance.log_msg("Exception in wsgi server loop, restarting wsgi in 60 seconds\n%s" % (str(e))) eventlet.sleep(60)
## We're starting up for the first time. if sys.platform != 'win32' and getattr(options,'daemonize'): # Do the daemon dance. Note that this isn't what is considered good # daemonization, because frankly it's convenient to keep the file # descriptiors open (especially when there are prints scattered all # over the codebase.) # What we do instead is fork off, create a new session, fork again. # This leaves the process group in a state without a session # leader. pid = os.fork() if not pid: os.setsid() pid = os.fork() if pid: os._exit(0) else: os._exit(0) print "(%s) now daemonized" % (os.getpid(),) # Close _all_ open (and othewise!) files. import resource maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 4096 for fdnum in xrange(maxfd): try: os.close(fdnum) except OSError, e: if e.errno != errno.EBADF: raise # Remap std{in,out,err}
def main(): parser = optparse.OptionParser(description="bitHopper") parser.add_option( "--debug", action="store_true", default=False, help="Extra error output. Basically print all caught errors" ) parser.add_option("--trace", action="store_true", default=False, help="Extra debugging output") parser.add_option( "--listschedulers", action="store_true", default=False, help="List alternate schedulers available" ) parser.add_option("--port", type=int, default=8337, help="Port to listen on") parser.add_option("--scheduler", type=str, default="OldDefaultScheduler", help="Select an alternate scheduler") parser.add_option("--threshold", type=float, default=None, help="Override difficulty threshold (default 0.43)") parser.add_option( "--altslicesize", type=int, default=900, help="Override Default AltSliceScheduler Slice Size of 900" ) parser.add_option( "--altminslicesize", type=int, default=60, help="Override Default Minimum Pool Slice Size of 60 (AltSliceScheduler only)", ) parser.add_option( "--altslicejitter", type=int, default=0, help="Add some random variance to slice size, disabled by default (AltSliceScheduler only)", ) parser.add_option( "--altsliceroundtimebias", action="store_true", default=False, help="Bias slicing slightly by round time duration with respect to round time target (default false)", ) parser.add_option( "--altsliceroundtimetarget", type=int, default=1000, help="Round time target based on GHash/s (default 1000 Ghash/s)", ) parser.add_option( "--altsliceroundtimemagic", type=int, default=10, help="Round time magic number, increase to bias towards round time over shares", ) parser.add_option("--config", type=str, default="bh.cfg", help="Select an alternate main config file from bh.cfg") parser.add_option( "--p2pLP", action="store_true", default=False, help="Starts up an IRC bot to validate LP based hopping." ) parser.add_option("--ip", type=str, default="", help="IP to listen on") parser.add_option("--auth", type=str, default=None, help="User,Password") parser.add_option("--logconnections", default=False, action="store_true", help="show connection log") options = parser.parse_args()[0] if options.trace == True: options.debug = True if options.listschedulers: schedulers = "" for s in scheduler.Scheduler.__subclasses__(): schedulers += ", " + s.__name__ print "Available Schedulers: " + schedulers[2:] return config = ConfigParser.ConfigParser() try: # determine if application is a script file or frozen exe if hasattr(sys, "frozen"): application_path = os.path.dirname(sys.executable) elif __file__: application_path = os.path.dirname(__file__) if not os.path.exists(os.path.join(application_path, options.config)): print "Missing " + options.config + " may need to rename bh.cfg.default" os._exit(-1) config.read(os.path.join(application_path, options.config)) except: if not os.path.exists(options.config): print "Missing " + options.config + " may need to rename bh.cfg.default" os._exit(-1) config.read(options.config) bithopper_instance = BitHopper(options, config) if options.auth: auth = options.auth.split(",") bithopper_instance.auth = auth if len(auth) != 2: print "User,Password. Not whatever you just entered" return # auth from config try: c = config.get("auth", "username"), config.get("auth", "password") bithopper_instance.auth = c except: pass override_scheduler = False if options.scheduler != None: scheduler_name = options.scheduler override_scheduler = True try: sched = config.get("main", "scheduler") if sched != None: override_scheduler = True scheduler_name = sched except: pass if override_scheduler: bithopper_instance.log_msg("Selecting scheduler: " + scheduler_name) foundScheduler = False for s in scheduler.Scheduler.__subclasses__(): if s.__name__ == scheduler_name: bithopper_instance.scheduler = s(bithopper_instance) foundScheduler = True break if not foundScheduler: bithopper_instance.log_msg("Error couldn't find: " + scheduler_name + ". Using default scheduler.") bithopper_instance.scheduler = scheduler.DefaultScheduler(bithopper_instance) else: bithopper_instance.log_msg("Using default scheduler.") bithopper_instance.scheduler = scheduler.DefaultScheduler(bithopper_instance) bithopper_instance.select_best_server() if options.p2pLP: bithopper_instance.log_msg("Starting p2p LP") bithopper_instance.lpBot = LpBot(bithopper_instance) lastDefaultTimeout = socket.getdefaulttimeout() if options.logconnections: log = None else: log = open(os.devnull, "wb") while True: try: listen_port = options.port try: listen_port = config.getint("main", "port") except ConfigParser.Error: pass socket.setdefaulttimeout(None) wsgi.server(eventlet.listen((options.ip, listen_port)), bithopper_instance.website.handle_start, log=log) socket.setdefaulttimeout(lastDefaultTimeout) break except Exception, e: print e eventlet.sleep(60)
def main(): parser = optparse.OptionParser(description='bitHopper') parser.add_option('--debug', action= 'store_true', default = False, help='Extra error output. Basically print all caught errors') parser.add_option('--trace', action= 'store_true', default = False, help='Extra debugging output') parser.add_option('--listschedulers', action='store_true', default = False, help='List alternate schedulers available') parser.add_option('--port', type = int, default=8337, help='Port to listen on') parser.add_option('--scheduler', type=str, default='DefaultScheduler', help='Select an alternate scheduler') parser.add_option('--threshold', type=float, default=None, help='Override difficulty threshold (default 0.43)') parser.add_option('--config', type=str, default='bh.cfg', help='Select an alternate main config file from bh.cfg') parser.add_option('--ip', type = str, default='', help='IP to listen on') parser.add_option('--auth', type = str, default=None, help='User,Password') parser.add_option('--logconnections', default = False, action='store_true', help='show connection log') # parser.add_option('--simple_logging', default = False, action='store_true', help='remove RCP logging from output') options = parser.parse_args()[0] if options.debug: logging.getLogger().setLevel(logging.DEBUG) elif options.trace: logging.getLogger().setLevel(0) else: logging.getLogger().setLevel(logging.INFO) if options.listschedulers: schedulers = "" for s in scheduler.Scheduler.__subclasses__(): schedulers += ", " + s.__name__ print "Available Schedulers: " + schedulers[2:] return config = ConfigParser.ConfigParser() try: # determine if application is a script file or frozen exe if hasattr(sys, 'frozen'): application_path = os.path.dirname(sys.executable) elif __file__: application_path = os.path.dirname(__file__) if not os.path.exists(os.path.join(application_path, options.config)): print "Missing " + options.config + " may need to rename bh.cfg.default" os._exit(1) config.read(os.path.join(application_path, options.config)) except: if not os.path.exists(options.config): print "Missing " + options.config + " may need to rename bh.cfg.default" os._exit(1) config.read(options.config) bithopper_instance = BitHopper(options, config) if options.auth: auth = options.auth.split(',') bithopper_instance.auth = auth if len(auth) != 2: print 'User,Password. Not whatever you just entered' return # auth from config try: c = config.get('auth', 'username'), config.get('auth', 'password') bithopper_instance.auth = c except: pass override_scheduler = False if options.scheduler != None: scheduler_name = options.scheduler override_scheduler = True try: sched = config.get('main', 'scheduler') if sched != None: override_scheduler = True scheduler_name = sched except: pass if override_scheduler: logging.info("Selecting scheduler: " + scheduler_name) foundScheduler = False for s in scheduler.Scheduler.__subclasses__(): if s.__name__ == scheduler_name: bithopper_instance.scheduler = s(bithopper_instance) foundScheduler = True break if not foundScheduler: logging.info("Error couldn't find: " + scheduler_name + ". Using default scheduler.") bithopper_instance.scheduler = scheduler.DefaultScheduler(bithopper_instance) else: logging.info("Using default scheduler.") bithopper_instance.scheduler = scheduler.DefaultScheduler(bithopper_instance) bithopper_instance.select_best_server() lastDefaultTimeout = socket.getdefaulttimeout() if options.logconnections: log = None else: log = open(os.devnull, 'wb') hook = plugins.Hook('plugins.bithopper.startup') hook.notify(bithopper_instance, config, options) while True: try: listen_port = options.port try: listen_port = config.getint('main', 'port') except ConfigParser.Error: logging.debug("Unable to load main listening port from config file") pass #This ugly wrapper is required so wsgi server doesn't die socket.setdefaulttimeout(None) wsgi.server(eventlet.listen((options.ip,listen_port), backlog=500),bithopper_instance.website.handle_start, log=log, max_size = 8000) socket.setdefaulttimeout(lastDefaultTimeout) break except Exception, e: logging.info("Exception in wsgi server loop, restarting wsgi in 60 seconds\n%s" % (str(e))) eventlet.sleep(60)
def run_cgi(self): """Execute a CGI script.""" dir, rest = self.cgi_info path = dir + '/' + rest i = path.find('/', len(dir)+1) while i >= 0: nextdir = path[:i] nextrest = path[i+1:] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): dir, rest = nextdir, nextrest i = path.find('/', len(dir)+1) else: break # find an explicit query string, if present. rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. i = rest.find('/') if i >= 0: script, rest = rest[:i], rest[i:] else: script, rest = rest, '' scriptname = dir + '/' + script scriptfile = self.translate_path(scriptname) if not os.path.exists(scriptfile): self.send_error( HTTPStatus.NOT_FOUND, "No such CGI script (%r)" % scriptname) return if not os.path.isfile(scriptfile): self.send_error( HTTPStatus.FORBIDDEN, "CGI script is not a plain file (%r)" % scriptname) return ispy = self.is_python(scriptname) if self.have_fork or not ispy: if not self.is_executable(scriptfile): self.send_error( HTTPStatus.FORBIDDEN, "CGI script is not executable (%r)" % scriptname) return # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html # XXX Much of the following could be prepared ahead of time! env = copy.deepcopy(os.environ) env['SERVER_SOFTWARE'] = self.version_string() env['SERVER_NAME'] = self.server.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PROTOCOL'] = self.protocol_version env['SERVER_PORT'] = str(self.server.server_port) env['REQUEST_METHOD'] = self.command uqrest = urllib.parse.unquote(rest) env['PATH_INFO'] = uqrest env['PATH_TRANSLATED'] = self.translate_path(uqrest) env['SCRIPT_NAME'] = scriptname if query: env['QUERY_STRING'] = query env['REMOTE_ADDR'] = self.client_address[0] authorization = self.headers.get("authorization") if authorization: authorization = authorization.split() if len(authorization) == 2: import base64, binascii env['AUTH_TYPE'] = authorization[0] if authorization[0].lower() == "basic": try: authorization = authorization[1].encode('ascii') authorization = base64.decodebytes(authorization).\ decode('ascii') except (binascii.Error, UnicodeError): pass else: authorization = authorization.split(':') if len(authorization) == 2: env['REMOTE_USER'] = authorization[0] # XXX REMOTE_IDENT if self.headers.get('content-type') is None: env['CONTENT_TYPE'] = self.headers.get_content_type() else: env['CONTENT_TYPE'] = self.headers['content-type'] length = self.headers.get('content-length') if length: env['CONTENT_LENGTH'] = length referer = self.headers.get('referer') if referer: env['HTTP_REFERER'] = referer accept = [] for line in self.headers.getallmatchingheaders('accept'): if line[:1] in "\t\n\r ": accept.append(line.strip()) else: accept = accept + line[7:].split(',') env['HTTP_ACCEPT'] = ','.join(accept) ua = self.headers.get('user-agent') if ua: env['HTTP_USER_AGENT'] = ua co = filter(None, self.headers.get_all('cookie', [])) cookie_str = ', '.join(co) if cookie_str: env['HTTP_COOKIE'] = cookie_str # XXX Other HTTP_* headers # Since we're setting the env in the parent, provide empty # values to override previously set values for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): env.setdefault(k, "") self.send_response(HTTPStatus.OK, "Script output follows") self.flush_headers() decoded_query = query.replace('+', ' ') if self.have_fork: # Unix -- fork as we should args = [script] if '=' not in decoded_query: args.append(decoded_query) nobody = nobody_uid() self.wfile.flush() # Always flush before forking pid = os.fork() if pid != 0: # Parent pid, sts = os.waitpid(pid, 0) # throw away additional data [see bug #427345] while select.select([self.rfile], [], [], 0)[0]: if not self.rfile.read(1): break if sts: self.log_error("CGI script exit status %#x", sts) return # Child try: try: os.setuid(nobody) except OSError: pass os.dup2(self.rfile.fileno(), 0) os.dup2(self.wfile.fileno(), 1) os.execve(scriptfile, args, env) except: self.server.handle_error(self.request, self.client_address) os._exit(127) else: # Non-Unix -- use subprocess cmdline = [scriptfile] if self.is_python(scriptfile): interp = sys.executable if interp.lower().endswith("w.exe"): # On Windows, use python.exe, not pythonw.exe interp = interp[:-5] + interp[-4:] cmdline = [interp, '-u'] + cmdline if '=' not in query: cmdline.append(query) self.log_message("command: %s", subprocess.list2cmdline(cmdline)) try: nbytes = int(length) except (TypeError, ValueError): nbytes = 0 p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env = env ) if self.command.lower() == "post" and nbytes > 0: data = self.rfile.read(nbytes) else: data = None # throw away additional data [see bug #427345] while select.select([self.rfile._sock], [], [], 0)[0]: if not self.rfile._sock.recv(1): break stdout, stderr = p.communicate(data) self.wfile.write(stdout) if stderr: self.log_error('%s', stderr) p.stderr.close() p.stdout.close() status = p.returncode if status: self.log_error("CGI script exit status %#x", status) else: self.log_message("CGI script exited OK")