def find_tags(self): """ Find all RuuviTags. Returns: dict: MAC and state of found sensors """ global BLUEZ_ERROR_DISPLAYED logger.debug("Try to find tags...") # This is the amount of time to listen for tags - TODO get from config file timeout = 10 tags = {} tags_skip = {} macs = [] start = current_milli_time() try: for data in RuuviTagSensor._get_ruuvitag_datas(macs, timeout): if current_milli_time() > (start+(timeout*1000)): break if (data[0] in tags) or (data[0] in tags_skip): continue logger.debug("Found TAG {}, DATA {}".format(data[0],data[1])) data_format = data[1]['data_format'] if data_format < 4: tags[data[0]] = data[1] else: tags_skip[data[0]] = data[1] logger.debug("Skipping data_format 4 tag - polling locks up thread") except: logger.error("error while finding tags") if tags is None or len(tags)==0: if not BLUEZ_ERROR_DISPLAYED: BLUEZ_ERROR_DISPLAYED = True logger.warning("No RuuviTags Found. Verify this is running on Raspian and " "that you have installed BLUEZ: 'sudo apt-get install bluez-hcidump'") return tags
def process_miner_local_config(miner): # We need to get the coin address and worker name # This is possible by grabbing the local configuration file from claymore, # but only if it's set in config.txt on the server and we have full access worker = "" if miner.coin_address is not None and len(miner.coin_address) == 0: # first set it to None miner.coin_address = None # assume no issue with retrieving the config file had_issue = False # TODO - move this section to a new module try: # try to get it from config.txt file filename = 'config.txt' config_txt_response = cgminer.get_claymore_configfile( miner.ip, filename) if config_txt_response.get('result') is not None: if config_txt_response['result'][0] == filename: config_txt = bytes.fromhex( config_txt_response['result'][1]).decode('utf-8') lines = config_txt.split('\n') for line in lines: if line.startswith('-ewal'): worker = line.split(' ')[1] # get the coin address and worker coin_address, worker = parse_worker_string( miner, worker) miner.coin_address = coin_address miner.worker_name = worker break if coin_address is None and worker is None: had_issue = True except: had_issue = True if had_issue: logger.warning( "MinerMedic could not automatically retrieve miner " "wallet address from config file ('config.txt'). " "To do this, ensure that the '-ewal' argument is " "not commented and set correctly. " "Otherwise, the coin address and worker name can " "be set manually in the object's configuration.")
def get_power_usage_estimate_by_card_count(card_count): try: from phenome import flask_app card_watts = int( flask_app.config.get('POWER_GPU_CARD_WATTS_DEFAULT')) except: logger.warning( "Could not get a configured WATT Power Usage per GPU card. " "Using default of {} Watts per card".format( _POWER_GPU_CARD_WATTS_DEFAULT)) card_watts = _POWER_GPU_CARD_WATTS_DEFAULT # estimate the total watts == the number of GPU cards * watts/per/card + some overhead # for the system and PSUs themselves considering this is probably a GPU rig return (card_count * card_watts) + 150
def get_hashrate_info(results, miner, algo): """ Get Hashrate Information for a particular Miner and Algo Returns: dict """ # do the lookup hashrate_info = results.get_hashrate_info(miner, algo) if hashrate_info is None: logger.warning("Model/Algo combination does not exist for " "miner model '{}' and algo '{}'".format( miner.model.model, algo)) return hashrate_info
def get_algo_by_index(algo_index): """ Retrieves algo when passed an algo INDEX Returns: Integer """ algo = None # get the algos enum enum_algos = global_enums.get_reverse_map_enum('_ALGOS_') try: algo = enum_algos[algo_index] except KeyError: logger.warning( "There is no Algo Index '{}' defined".format(algo_index)) return algo
def get_algo_index(algo): """ Retrieves algo INDEX when passed an algo Returns: Integer """ # get the algos enum enum_algos = global_enums.get_enum('_ALGOS_') # clean the algo string algo_str_clean = algo.replace("-", "").lower() try: algo_idx = enum_algos[algo_str_clean].value except KeyError: logger.warning("There is no Algo '{}' defined. " "Check the _ALGOS_ ENUM for the key {}.".format( algo, algo_str_clean)) algo_idx = -1 return algo_idx
def geturl(self, url='index.htm'): """ Get a URL from the userid/password protected powerswitch page Return None on failure """ full_url = "http://%s/%s" % (self.hostname, url) result = None request = None for i in range(0, self.retries): try: request = requests.get(full_url, auth=(self.userid, self.password,), timeout=self.timeout) except requests.exceptions.RequestException as e: logger.warning("Request to URL {} timed out - {} retries left.".format(full_url, (self.retries - i - 1))) logger.debug("Caught exception {}".format(e)) continue if request is not None and request.status_code == 200: result = request.content break if request is not None: logger.debug('Request to URL {} - response code: {}'.format(full_url, request.status_code)) return result
def process_pool_apis(results, miner, worker, algo, pool): """ Processes all pool APIs for the specific pool Returns: None """ success = False pool_attrs = None pool_class = None pool_not_supported = False if pool not in pool_lookups: # get the pool classnames enum enum_pools = global_enums.get_enum_details('_POOL_STATS_CLASSNAMES_') for enum_pool in enum_pools: pool_attrs = enum_pool.value[0] pool_url_match = pool_attrs['url_match'] m = re.search(pool_url_match, pool) if m is not None: # add the name to the attributes pool_attrs['name'] = enum_pool.name # add the attributes to the list. pool_lookups[pool] = pool_attrs break else: pool_attrs = None else: pool_attrs = pool_lookups[pool] if pool_attrs is not None: pool_id = pool_attrs["value"] pool_classname = pool_attrs["classname"] # get the model class to operate with pool_class = locate(pool_classname) unit_tests_running = False if pool_class is None: logger.error("pool classname {} could not be initialized".format( pool_classname)) else: # create the mining pool mining_pool = pool_class(pool, pool_attrs) try: unit_tests_running = sys._unit_tests_running except: pass if not unit_tests_running: try: # add the relations (we do not need them for unit tests) mining_pool.relate(miner) except Exception as ex: logger.debug(ex) # execute the "get_pool_stats(...)" method success = mining_pool.get_pool_stats(results, miner, worker, algo, int(pool_id), pool) else: pool_not_supported = True logger.warning("POOL {} not yet supported".format(pool)) # TODO - issue notification? log an issue on GitHub? if success is False and pool_not_supported is False: # There is a legitimate error... if pool_class is None: logger.error( "No Pool support found for Pool/Model/Algo {}/{}/{}".format( pool, miner.model.model, algo)) else: logger.error( "Pool stats not returned for Pool/Model/Algo {}/{}/{}".format( pool, miner.model.model, algo))
def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url): # initialize with a 0 hashrate hashrate = 0.0 # profit on MPOS site is measured in BTC/GH/DAY profit_btc_gh_day = 0.0 if self._pool_info is None: # we are SOL logger.error("Cannot get needed POOL DATA from (getminingandprofitsstatistics) API call.") return False # get the API KEY and USER ID api_key, user_id = self._get_api_key_and_user_id() if api_key is None or len(api_key)==0 or user_id is None or user_id == 0: warn_msg = "MINING POOL HUB Needs API_KEY and USER_ID" warn_msg += " in order to retrieve Miner Data. Set using UI or .INI file" logger.warning(warn_msg) return False # Get the pool info, should be a list of potential multialgo pools based on port for pool in self._pool_info: algo = pool['algo'] coin = pool['coin_name'] # profit == BTC / GH / DAY profit_btc_gh_day = pool['profit'] # try to build a URL: url = self._build_api_hashrate_url(api_key, user_id, coin) # create an API object api = RestAPI(url=url, port=80) # get the data json = api.get_json() if json: hashrate = json['getuserhashrate']['data'] if hashrate > 0: # this must be the right pool break # get the algo algo_idx = get_algo_index(algo) if algo_idx == -1: return False # get the index and cost of the coin coin_idx = get_coin_index(coin) coin_cost = get_coin_cost_by_index(coin_idx,'USD') coin_cost_btc = get_coin_cost('BTC', 'USD') coin_cost_ratio = coin_cost_btc/coin_cost # The API is returning a number 1000 times larger than the WEB Dashboard, which is reporting in MH/S # We are going to surmise that the hashrate is reported in KH/s from the API # BUT we want to convert to GH/s to be consistent with profitability here # so force suffix to be GH speed_suffix = "GH" # and divide by 1M to get hashrate expressed in GH hashrate_ghs = (hashrate / 1000000) # We need the profitability to be in: COIN / speed_suffix / day # multiply by ratio of 'COIN' to BTC profit_coin_gh_day = profit_btc_gh_day * coin_cost_ratio # finally set the API results into the main results object results.populate_pool_results(miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost, profit_coin_gh_day, hashrate_ghs, None, speed_suffix) return True
def poll(miner, results): global errors # Uses SG-MINER 5.6.6 logger.debug("poll_baikal() - miner=" + str(miner.id)) # get the DEVICE / CARD stats miner_stats = cgminer.get_baikal_devs(miner.ip) elapsed_secs = -1 # if miner STILL not accessible status = miner_stats['STATUS'][0]['STATUS'] if status == 'error': errors = True results.inactive_objects.append(miner) else: worker = "" total_mh = 0 Xs = 0 Os = 0 Ds = 0 Ts = 0 temps = [] fan_speeds = [] elapsed_secs = 0 total_chips = 0 pool_name = "" algo = "" worker = "" chips_list = 0 # Get total number of chips according to miner's model # convert miner.model.chips to int list and sum try: chips_list = [int(y) for y in miner.chips.split(',')] Ts = sum(chips_list) except: if Ts == 0: logger.debug("process_baikal() - miner=" + str(miner.id) + " - chips are EMPTY - check object properties.") # Get active pool miner_pools = cgminer.get_pools(miner.ip) for pool in miner_pools['POOLS']: if pool['Stratum Active'] == True: worker = pool['User'] algo = pool['Algorithm'] #pool_name = pool['Name'] # get the PORT as well, different pools/algos at different ports pool_name = pool['URL'].split("//",1)[-1] break if algo == "": logger.warning("process_baikal() - miner=" + str(miner.id) + " - Miner is IDLE.") # increase idle cycles miner.idle_cycles_count = miner.idle_cycles_count + 1 # get the coin address and worker coin_address, worker = parse_worker_string(miner, worker) # Get other miner stats (temps, chips, etc.) for board in miner_stats['DEVS']: if board['Enabled']=="Y": # get the total MH board_mh = float(board['MHS 5s']) total_mh = total_mh + board_mh # get the working chips if board['Status']!="Alive" or board_mh == 0: # we have a dead board! need to Xs = Xs + chips_list[0] else: # this board is running elapsed_secs = board['Device Elapsed'] Os = Os + chips_list[0] board_temp = board['Temperature'] temps.append(board_temp) # convert to GigaHashes, since hashrate algo requires that to start hashrate_ghs = float(total_mh / 1000) # get the chip stats chip_stats = MinerChipStats(Os, Xs, Ds, Ts) # Get HW Errors summary_stats = cgminer.get_summary(miner.ip) # process error rate hw_error_rate = math_functions.get_formatted_float_rate(summary_stats['SUMMARY'][0]['Device Hardware%'], 4) # populate results results.populate_miner_results(miner, elapsed_secs, worker, algo, pool_name, chip_stats, temps, fan_speeds, hashrate_ghs, hw_error_rate) return elapsed_secs
def poll(miner, results): logger.debug("poll_etherminer() - miner=" + str(miner.id)) elapsed_secs = -1 # get the miner stats miner_stats = cgminer.get_claymore_stats(miner.ip) # if miner not accessible... add to inactive if miner_stats.get('result') is None: results.inactive_objects.append(miner) else: result = miner_stats['result'] # TODO - to support dual mining, will have to refactor this code # and probably call twice, once for each algo algo_idx = 0 # version of claymore and COIN being mined version, coin = result[0].split(" - ") if coin is not None and len( coin) > 0 and coin != miner.hashrate[0]['coin']: # coin changed, need to update it coin_index = get_coin_index(coin) if coin_index >= 0: # set the COIN currently being mined miner.hashrate[algo_idx]['coin'] = coin # process the local config to get miner coin address and worker name GPU_CLAYMORE.process_miner_local_config(miner) # Get pool name pool = result[7] if coin == "ETH" or coin == "ETC" or "ethermine" in pool: algo = "ethash" else: # usually you can get the algo from the pool algo = get_algo(pool) if miner.hashrate[algo_idx]['algo'] != algo and algo is not None: miner.hashrate[algo_idx]['algo'] = algo # Get miner's GPU stats gpu_hashes_string = result[3] gpu_hashes = gpu_hashes_string.split(';') # count number of working GPU Os = sum([int(x) > 0 for x in gpu_hashes]) # count number of non-working GPUs (does not apply) Xs = 0 # get number of in-active GPUs Gi = sum([int(x) == 0 for x in gpu_hashes]) # Get total number of GPUs Ts = len(gpu_hashes) if Gi == Ts: logger.warning("process_claymore() - miner=" + str(miner.id) + " - Miner is IDLE.") # increase idle cycles miner.idle_cycles_count = miner.idle_cycles_count + 1 # Get the temperatures of the miner, they are mixed with fan speeds temps_and_fans = result[6].split(';') # get the temps and convert to ints temps = temps_and_fans[::2] temps = [int(i) for i in temps] # get the fan speeds and convert to ints fan_pcts = temps_and_fans[1::2] fan_pcts = [int(i) for i in fan_pcts] # Get Total Hashrate for Miner (expressed in KH/s from the API) eth_stats = result[2].split(';') current_hashrate = int(eth_stats[0]) # Get Gigahashes by converting the KH to GH ghs5s = float(int(current_hashrate) / 1000000) # TODO - revisit with dual mining algo_rate = miner.hashrate[algo_idx]['rate'] if algo_rate is None or algo_rate == 0: # get the hashrate in the correct units normalized_rate, hashrate_units = get_normalized_hashrate_from_gigahash_per_sec( ghs5s, miner.hashrate[algo_idx]['units']) miner.hashrate[algo_idx]['rate'] = normalized_rate if miner.power_usage_watts == 0: if miner.hashrate[algo_idx]['power'] == 0: # TODO - if this is connected to a PDU, check whether there is Power Management on the PDU # and if it will tell you power usage. If so, use those estimates... # and set miner.power_usage_watts = XXX # If not... pass if miner.power_usage_watts == 0: # estimate power usage based on card count and default GPU card power usage setting # FALLBACK TO CONFIG AND DEFAULTS miner.power_usage_watts = GPU_CLAYMORE.get_power_usage_estimate_by_card_count( Ts) eth_shares_good = int(eth_stats[1]) eth_shares_stale = int(eth_stats[2]) eth_shares_invalid = int(result[8].split(';')[0]) eth_shares_total = eth_shares_good + eth_shares_stale + eth_shares_invalid hw_error_rate_raw = ((eth_shares_stale + eth_shares_invalid) / eth_shares_total) * 100 hw_error_rate = math_functions.get_formatted_float_rate( hw_error_rate_raw, 4) # Get uptime elapsed_secs = int(result[1]) * 60 chip_stats = MinerChipStats(Os, Xs, Gi, Ts) results.populate_miner_results(miner, elapsed_secs, miner.worker_name, algo, pool, chip_stats, temps, fan_pcts, ghs5s, hw_error_rate) if (Gi > 0): # some special debug for GPU miner issues logger.debug("Missing {} GPUs in miner {}, stats={}".format( Gi, miner.ip, gpu_hashes_string)) return elapsed_secs