def term_proc(proc, term_wait_time=3): global pid_to_procs if proc.poll() is None: LOG.debug( "Terminating process %i and waiting up to %i seconds for it to end...", proc.pid, term_wait_time, ) proc.terminate() try: proc.wait(term_wait_time) except TimeoutExpired: pass if proc.poll() is None: LOG.warning( "Process %i didn't terminate. Killing process and waiting until process exits...", proc.pid, ) proc.kill() proc.wait() if proc.stdout: proc.stdout.close() if proc.stderr: proc.stder.close() if proc.stdin: proc.stdin.close() assert proc.poll( ) is not None, "Proc %i didn't terminate properly" % proc.pid LOG.debug("Process %i terminated with %i", proc.pid, proc.poll())
def get_most_profitable_miner(self): LOG.debug("Finding most profitable algo...") algo_info = self._get_algo_info() algo_info = self._filter_blacklisted_algos_from_algo_info(algo_info) algo_to_miners = self._create_miners_for_algo_info(algo_info) algo_to_benchmarks = self._get_benchmarks(algo_to_miners) best_algo = self._get_most_profitable_algo(algo_info, algo_to_benchmarks) return algo_to_miners[best_algo]
def stop_mining_and_return_when_stopped(self): LOG.debug("Terminating ccminer (%s)...", self.algo) term_proc(self.miner_proc) LOG.debug("Terminating logging thread for ccminer (%s)...", self.algo) self.logger_thread.join() self.miner_proc = None self.logger_thread = None
def _get_algo_info(self): LOG.debug("Fetching currency info...") d = list( list( Fetcher.fetch_json_api( "https://api.nicehash.com/api?method=simplemultialgo.info" ).values())[0].values())[0] return self._to_algo_info(d)
def start(): try: _parse_args_and_start_mining() except SystemExit as ex: raise ex except Exception: LOG.exception("Uncaught exception caused a program crash!") LOG.debug("Exiting...") sys.exit(1)
def get_most_profitable_miner(self): if self._calc_all_algo_data_sources: curr_src = self._algo_data_source other_src = "currency" if curr_src == "algo" else "algo" LOG.debug("Switching to source \"%s\"..." % other_src) self._algo_data_source = other_src super().get_most_profitable_miner() LOG.debug("Reverting to source \"%s\"..." % curr_src) self._algo_data_source = curr_src return super().get_most_profitable_miner()
def add_rate(self, rate): LOG.debug("Rate added: %s...", rate) if len(self._rates) > 0: if rate > self._rates[-1]: self._direction.append(1) elif rate == self._rates[-1]: self._direction.append(0.5) else: self._direction.append(0) self._rates.append(rate)
def _stdout_printer(stdout, name, share_cond, algo): for line in stdout: line = line.decode("UTF-8").strip() if "booooo" in line or "yes!" in line: share_cond.acquire() share_cond.notify_all() share_cond.release() speed = line.split(",")[-1].rpartition(" ")[0].strip() LOG.share(algo, "yes!" in line, Rate(speed)) LOG.debug(line)
def _get_most_profitable_algo(self, algo_info, algo_to_benchmarks): def _get_prof(a): return algo_to_benchmarks[a.algo].get_mbtc_per_day(a.prof_str) sorted_algo_info = sorted( [a for a in algo_info if a.algo in algo_to_benchmarks], key=_get_prof, reverse=True, ) for a in sorted_algo_info: LOG.debug(" Profitability of %s = %s mBTC / day", a.algo, _get_prof(a)) return sorted_algo_info[0].algo
def start_proc(cmd, pipe_stdout=False, preexec_fn=None): global pid_to_procs kwargs = {} if pipe_stdout: kwargs["stdout"] = PIPE if preexec_fn: kwargs["preexec_fn"] = preexec_fn p = Popen(cmd.split(" "), **kwargs) procs.append(p) LOG.debug("Started \"%s\" with pid of %i", cmd, p.pid) return p
def _create_miners_for_algo_info(self, algo_info): LOG.debug("Prepping miners...") miners = {} algo_to_info = {a.algo: a for a in algo_info} supported_algos = get_supported_algos() & set(algo_to_info.keys()) for algo in supported_algos: url = algo_to_info[algo].url port = algo_to_info[algo].port password = self._generate_password(algo) miners[algo] = get_miner_for_algo(algo, url, port, self._wallet, password) return miners
def _wait(current_miner): start_time = time() LOG.debug("Sleeping for %i seconds...", MiningMonitor.CHECK_INTERVAL) # We have to busy wait because of some complex exit scenarios. while time( ) < start_time + MiningMonitor.CHECK_INTERVAL and current_miner.is_mining( ): if MiningMonitor._exit_status is not None: LOG.debug("Exiting program with status %i...", MiningMonitor._exit_status) sys.exit(MiningMonitor._exit_status) sleep(0.01)
def mine(mining_group): LOG.info("Starting miner for \033[92m%s\033[0m!", mining_group) current_miner = None while True: LOG.debug("Finding most profitable miner for \033[92m%s\033[0m...", mining_group) best_miner = mining_group.get_most_profitable_miner() LOG.debug("Found best miner: \033[92m%s\033[0m!", best_miner) if best_miner != current_miner: LOG.info("Switching to \033[92m%s\033[0m...", best_miner) MiningMonitor.switch_miner_and_return_when_started( best_miner, current_miner) current_miner = best_miner LOG.info("Switch complete! Shares incoming...") MiningMonitor._check_file_descriptors() MiningMonitor._wait(current_miner)
def benchmark(self): cmd = self._get_run_cmd(self.path_to_exec, self.algo, "", "", "", "", kwargs={ "--benchmark": "", "--no-color": "" }) LOG.debug("Benchmarking \033[92m%s\033[0m...", self.algo) cache_key = "BENCHHR%s" % (cmd) cached_benchmark = MinerStore.get(cache_key) if cached_benchmark: b = Rate(cached_benchmark) LOG.debug("Benchmark found in cache: %s!", b) return b LOG.info("Benchmark not found for \033[92m%s\033[0m. Benchmarking...", self.algo) bench_proc = start_proc(cmd, pipe_stdout=True) bm = Benchmarker() for line in bench_proc.stdout: line = line.strip().decode("UTF-8") if "Total:" in line: r = Rate(line.split(":")[-1].strip()) bm.add_rate(r) final_hashrate = bm.get_benchmark() if final_hashrate: break term_proc(bench_proc) MinerStore.set(cache_key, str(final_hashrate)) LOG.info("Benchmark found: %s!", final_hashrate) return final_hashrate
def fetch_json_api(url, use_cache_on_failure=False): LOG.debug("Fetching %s...", url) num_tries = 0 while True: resp_json = Fetcher._try_fetching_resp(url) LOG.debug("Response from %s: %s", url, resp_json) if resp_json: Fetcher._cache[url] = resp_json return resp_json elif use_cache_on_failure and url in Fetcher._cache: LOG.warn( "Bad response from '%s\'! Falling back to cache...\033[0m\n", url) return Fetcher._cache[url] num_tries += 1 sleep_time = min(2**num_tries - 1, Fetcher._MAX_WAIT) LOG.warning( "Bad response from '%s\'! Backing off for %i second(s)...", url, sleep_time) sleep(sleep_time)
def term_all_procs(term_wait_time=1): global procs LOG.debug("Terminating all processes...") for p in procs: if p.poll() is None: term_proc(p, term_wait_time=term_wait_time)
def _get_benchmarks(self, algo_to_miners): LOG.debug("Loading benchmarks from cache...") return {algo: miner.benchmark() for algo, miner in algo_to_miners.items()}
def _signal_handler(signum, frame): LOG.debug('Signal handler called with signal %i', signum) term_all_procs() os._exit(128 + signum)
def _get_algo_info_from_algos(self): LOG.debug("Fetching algo info...") return self._status_to_algo_info( list( Fetcher.fetch_json_api( "http://www.zpool.ca/api/status").values()))
def _get_algo_info_from_currencies(self): LOG.debug("Fetching currency info...") return self._currency_to_algo_info( list( Fetcher.fetch_json_api( "http://www.zpool.ca/api/currencies").values()))