def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url): # ensure we have an ALGO IDX (i.e. it is known by the system) algo_idx = get_algo_index(algo) if algo_idx is None: # TODO - throw an exception return False coin_idx = get_coin_index(self._DEFAULT_COIN_) # get the cost of the coin # TODO - get the currency from the config, do not assume USD # TODO - cache coin_cost for a period... (or implement REST API result cache) coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD') success = False # build the miner URL url = self._MINER_URL.replace("{MINER}", miner.coin_address) # create an API object api = RestAPI(url=url, port=80) # get the data json = api.get_json() if json: success = self.parse_json(json, results, miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost) return success
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url): if algo == 'ethash': algo_idx = get_algo_index('daggerhashimoto') else: algo_idx = get_algo_index(algo) if algo_idx is -1: return False coin_idx = get_coin_index(self._DEFAULT_COIN_) # get the cost of the coin # TODO - get the currency from the config, do not assume USD coin_cost = get_coin_cost(self._DEFAULT_COIN_, 'USD') success = False json = self.get_worker_stats(miner, worker) if json: success = self.parse_json(json, results, miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost) return success
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url): # initialize with a 0 hashrate hashrate = 0.0 # profit on MPOS site is measured in BTC/GH/DAY profit_btc_gh_day = 0.0 if self._pool_info is None: # we are SOL logger.error("Cannot get needed POOL DATA from (getminingandprofitsstatistics) API call.") return False # get the API KEY and USER ID api_key, user_id = self._get_api_key_and_user_id() if api_key is None or len(api_key)==0 or user_id is None or user_id == 0: warn_msg = "MINING POOL HUB Needs API_KEY and USER_ID" warn_msg += " in order to retrieve Miner Data. Set using UI or .INI file" logger.warning(warn_msg) return False # Get the pool info, should be a list of potential multialgo pools based on port for pool in self._pool_info: algo = pool['algo'] coin = pool['coin_name'] # profit == BTC / GH / DAY profit_btc_gh_day = pool['profit'] # try to build a URL: url = self._build_api_hashrate_url(api_key, user_id, coin) # create an API object api = RestAPI(url=url, port=80) # get the data json = api.get_json() if json: hashrate = json['getuserhashrate']['data'] if hashrate > 0: # this must be the right pool break # get the algo algo_idx = get_algo_index(algo) if algo_idx == -1: return False # get the index and cost of the coin coin_idx = get_coin_index(coin) coin_cost = get_coin_cost_by_index(coin_idx,'USD') coin_cost_btc = get_coin_cost('BTC', 'USD') coin_cost_ratio = coin_cost_btc/coin_cost # The API is returning a number 1000 times larger than the WEB Dashboard, which is reporting in MH/S # We are going to surmise that the hashrate is reported in KH/s from the API # BUT we want to convert to GH/s to be consistent with profitability here # so force suffix to be GH speed_suffix = "GH" # and divide by 1M to get hashrate expressed in GH hashrate_ghs = (hashrate / 1000000) # We need the profitability to be in: COIN / speed_suffix / day # multiply by ratio of 'COIN' to BTC profit_coin_gh_day = profit_btc_gh_day * coin_cost_ratio # finally set the API results into the main results object results.populate_pool_results(miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost, profit_coin_gh_day, hashrate_ghs, None, speed_suffix) return True
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url): # initialize with a 0 hashrate hashrate = 0.0 # get the coin from the pool URL # should be the format of: "ltc-us.f2pool.com:8888" pool_coin = pool_url.split(".")[0] if "-" in pool_coin: # seems pool location may be inside, remove that pool_coin = pool_coin.split("-")[0] # The F2Pool uses the CURRENCY in the API call but requires currency name # instead of the actual symbol - so we need to translate. # For example: # ltc == litecoin # btc == bitcoin # eth == ethereum # get the coin coin = get_coin_name_by_symbol(pool_coin) if coin is not None: # try to build a URL: url = self._build_api_hashrate_url(miner.coin_address, coin) # create an API object api = RestAPI(url=url, port=80) # get the data json = api.get_json() # hashrate from history - don't need this right now # hashrate = self.get_last_complete_hashrate_from_history(json) workers = json['workers'] for worker_record in workers: if worker_record[0] == worker: # current hashrate per worker hashrate = float(worker_record[1]) break # get the algo algo_idx = get_algo_index(algo) if algo_idx == -1: return False # get the index and cost of the coin coin_idx = get_coin_index(pool_coin) coin_cost = get_coin_cost_by_index(coin_idx, 'USD') coin_cost_btc = get_coin_cost('BTC', 'USD') coin_cost_ratio = coin_cost_btc / coin_cost # profit == BTC / GH / DAY # profit_btc_gh_day = pool['profit'] # BUT we want to convert to GH/s to be consistent with profitability here # so force suffix to be GH speed_suffix = "GH" # must divide by 1G to get hashrate expressed in GH hashrate_ghs = (hashrate / 1000000000) # hack to get profit per day - need to verify with F2Pool support what this means and the units # would be better to get the value earned per worker rather than total value for all miners worker_count = int(json['worker_length']) profit_btc_gh_day = float(json['value_last_day']) / worker_count # We need the profitability to be in: COIN / speed_suffix / day # multiply by ratio of 'COIN' to BTC profit_coin_gh_day = profit_btc_gh_day * coin_cost_ratio # finally set the API results into the main results object results.populate_pool_results(miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost, profit_coin_gh_day, hashrate_ghs, None, speed_suffix) return True
def poll(miner, results): logger.debug("poll_etherminer() - miner=" + str(miner.id)) elapsed_secs = -1 # get the miner stats miner_stats = cgminer.get_claymore_stats(miner.ip) # if miner not accessible... add to inactive if miner_stats.get('result') is None: results.inactive_objects.append(miner) else: result = miner_stats['result'] # TODO - to support dual mining, will have to refactor this code # and probably call twice, once for each algo algo_idx = 0 # version of claymore and COIN being mined version, coin = result[0].split(" - ") if coin is not None and len( coin) > 0 and coin != miner.hashrate[0]['coin']: # coin changed, need to update it coin_index = get_coin_index(coin) if coin_index >= 0: # set the COIN currently being mined miner.hashrate[algo_idx]['coin'] = coin # process the local config to get miner coin address and worker name GPU_CLAYMORE.process_miner_local_config(miner) # Get pool name pool = result[7] if coin == "ETH" or coin == "ETC" or "ethermine" in pool: algo = "ethash" else: # usually you can get the algo from the pool algo = get_algo(pool) if miner.hashrate[algo_idx]['algo'] != algo and algo is not None: miner.hashrate[algo_idx]['algo'] = algo # Get miner's GPU stats gpu_hashes_string = result[3] gpu_hashes = gpu_hashes_string.split(';') # count number of working GPU Os = sum([int(x) > 0 for x in gpu_hashes]) # count number of non-working GPUs (does not apply) Xs = 0 # get number of in-active GPUs Gi = sum([int(x) == 0 for x in gpu_hashes]) # Get total number of GPUs Ts = len(gpu_hashes) if Gi == Ts: logger.warning("process_claymore() - miner=" + str(miner.id) + " - Miner is IDLE.") # increase idle cycles miner.idle_cycles_count = miner.idle_cycles_count + 1 # Get the temperatures of the miner, they are mixed with fan speeds temps_and_fans = result[6].split(';') # get the temps and convert to ints temps = temps_and_fans[::2] temps = [int(i) for i in temps] # get the fan speeds and convert to ints fan_pcts = temps_and_fans[1::2] fan_pcts = [int(i) for i in fan_pcts] # Get Total Hashrate for Miner (expressed in KH/s from the API) eth_stats = result[2].split(';') current_hashrate = int(eth_stats[0]) # Get Gigahashes by converting the KH to GH ghs5s = float(int(current_hashrate) / 1000000) # TODO - revisit with dual mining algo_rate = miner.hashrate[algo_idx]['rate'] if algo_rate is None or algo_rate == 0: # get the hashrate in the correct units normalized_rate, hashrate_units = get_normalized_hashrate_from_gigahash_per_sec( ghs5s, miner.hashrate[algo_idx]['units']) miner.hashrate[algo_idx]['rate'] = normalized_rate if miner.power_usage_watts == 0: if miner.hashrate[algo_idx]['power'] == 0: # TODO - if this is connected to a PDU, check whether there is Power Management on the PDU # and if it will tell you power usage. If so, use those estimates... # and set miner.power_usage_watts = XXX # If not... pass if miner.power_usage_watts == 0: # estimate power usage based on card count and default GPU card power usage setting # FALLBACK TO CONFIG AND DEFAULTS miner.power_usage_watts = GPU_CLAYMORE.get_power_usage_estimate_by_card_count( Ts) eth_shares_good = int(eth_stats[1]) eth_shares_stale = int(eth_stats[2]) eth_shares_invalid = int(result[8].split(';')[0]) eth_shares_total = eth_shares_good + eth_shares_stale + eth_shares_invalid hw_error_rate_raw = ((eth_shares_stale + eth_shares_invalid) / eth_shares_total) * 100 hw_error_rate = math_functions.get_formatted_float_rate( hw_error_rate_raw, 4) # Get uptime elapsed_secs = int(result[1]) * 60 chip_stats = MinerChipStats(Os, Xs, Gi, Ts) results.populate_miner_results(miner, elapsed_secs, miner.worker_name, algo, pool, chip_stats, temps, fan_pcts, ghs5s, hw_error_rate) if (Gi > 0): # some special debug for GPU miner issues logger.debug("Missing {} GPUs in miner {}, stats={}".format( Gi, miner.ip, gpu_hashes_string)) return elapsed_secs