def scan(self, collector):

        """
        Scans for RuuviTags using BlueTooth

        Returns:
            None
        """

        logger.debug("Start scan...")

        try:

            tags = self.find_tags()

            # Now, iterate through found tags and put into the collector

            for tag in tags:

                if not collector.has_object(tag):

                    # get data about this tag
                    data = tags[tag]

                    # and place tag into the DB
                    sensor = self.add(tag, data)

                    if sensor:
                        # we have successfully added into the DB
                        collector.populate(sensor)

        except Exception as ex:
            logger.error("Problem during scan: {}".format(ex))
示例#2
0
    def populate_fan_results(self, miner, fan_speeds):
        """
        Populates the Fan Results for a Miner
        :param miner: Miner Object
        :param fan_speeds: (List) of Fan Speeds for that Miner
        :return: None
        """

        fan_avg = 0
        fan_status_pct = 100

        try:

            # get the avg fan
            fan_avg = math_functions.get_average_of_list(fan_speeds, 0)
            if fan_avg < 101:
                if fan_avg == 0:
                    fan_avg = ""
                else:
                    fan_avg = str(fan_avg) + "%"

            for fs in fan_speeds:
                if fs <= 0:
                    fan_status_pct = fan_status_pct - (1 / len(fan_speeds))

            if fan_status_pct < 0:
                fan_status_pct = 0

        except Exception as ex:
            logger.error(ex)

        self.set_result(miner, 'fan_status', fan_status_pct)
        self.set_result(miner, 'fan_speeds', fan_speeds)
        self.set_result(miner, 'fan_speeds_avg', fan_avg)
    def get_outlet_state(self, outlet=None):
        """
        Retrieves state of the outlet (not the WIFI plug)

        Returns:
            Integer - _STATE_TYPES_
        """

        power_state = self.CONST_STATE_UNKNOWN

        try:

            plug = self._get_plug()

            if plug:
                info = plug.info()
                relay_state = info['system']['get_sysinfo']['relay_state']
                if relay_state == 0:
                    # powered OFF
                    power_state = self.CONST_STATE_OFF
                else:
                    power_state = self.CONST_STATE_ON

        except Exception as ex:
            logger.error(ex)

        return power_state
示例#4
0
def add(ipaddress, mac, uniqueid):

    from phenome_core.core.database.model.api import create_object, get_objectmodel_by_name, get_object_by_ip_and_model_id
    model = get_objectmodel_by_name(_CORE_AGENT_MODEL_CLASSNAME_)

    if model is None:
        logger.error("Model not initialized. Please check JSON/config and DB.")
        return None

    obj = get_object_by_ip_and_model_id(ipaddress, model.id)

    if obj is None:

        try:

            from sqlalchemy.exc import IntegrityError

            # create a new core agent
            obj = create_object(model.id, ipaddress, mac, uniqueid)

            # commit
            db_session.add(obj)
            db_session.commit()

        except IntegrityError as e:
            db_session.rollback()

    return obj
示例#5
0
    def __process_miner_pool_apis(self, miner, worker, algo, pool):

        # contact pools, do profitability stuff
        try:
            process_pool_apis(self, miner, worker, algo, pool)
        except Exception as ex:
            logger.error("Problem processing pool APIs for "
                         "pool '{}' algo '{}', error {}".format(
                             pool, algo, ex))
示例#6
0
    def _get_switch(self):

        switch = None

        try:
            switch = PowerSwitch(hostname=self.ip, userid=self.username, password=self.password, cycletime=3)
        except Exception as ex:
            logger.error(ex)

        return switch
    def find_tags(self):

        """
        Find all RuuviTags.

        Returns:
            dict: MAC and state of found sensors

        """

        global BLUEZ_ERROR_DISPLAYED

        logger.debug("Try to find tags...")

        # This is the amount of time to listen for tags - TODO get from config file
        timeout = 10

        tags = {}
        tags_skip = {}

        macs = []
        start = current_milli_time()

        try:

            for data in RuuviTagSensor._get_ruuvitag_datas(macs, timeout):

                if current_milli_time() > (start+(timeout*1000)):
                    break

                if (data[0] in tags) or (data[0] in tags_skip):
                    continue

                logger.debug("Found TAG {}, DATA {}".format(data[0],data[1]))

                data_format = data[1]['data_format']

                if data_format < 4:
                    tags[data[0]] = data[1]
                else:
                    tags_skip[data[0]] = data[1]
                    logger.debug("Skipping data_format 4 tag - polling locks up thread")

        except:
            logger.error("error while finding tags")

        if tags is None or len(tags)==0:
            if not BLUEZ_ERROR_DISPLAYED:
                BLUEZ_ERROR_DISPLAYED = True
                logger.warning("No RuuviTags Found. Verify this is running on Raspian and "
                               "that you have installed BLUEZ: 'sudo apt-get install bluez-hcidump'")

        return tags
示例#8
0
    def populate_miner_results(self, miner, elapsed_secs, worker, algo, pool,
                               chips, temps, fan_speeds, hashrate_ghs5s,
                               hw_error_rate):
        """
        Populates all Miner Results for a particular poll cycle
        :param miner: Miner Object
        :param elapsed_secs: Number of Seconds that miner has been running
        :param worker: (String) worker ID
        :param algo: (String) Algorithm running on Miner at that time
        :param pool: The Pool ID being mined
        :param chips: MinerChipStats object
        :param temps: (list) of temps on miner
        :param fan_speeds: (list) of fan speeds on miner
        :param hashrate_ghs5s: "current" Hashrate in GigaHash/sec (over the last 5 or more seconds)
        :param hw_error_rate: rate of hardware errors
        :return:
        """

        self.set_result(miner, 'uptimes', timedelta(seconds=elapsed_secs))
        self.set_result(miner, 'temperatures', temps)  # all temps
        self.set_result(miner, 'temperature',
                        math_functions.get_average_of_list(temps, 0))
        self.set_result(miner, 'hw_error_rates', hw_error_rate)

        self.populate_chip_results(miner, chips)
        self.populate_fan_results(miner, fan_speeds)

        call_pool_apis = True
        call_hashrate_calcs = True

        try:
            if sys._unit_tests_running:
                call_pool_apis = sys._unit_tests_MINERMEDIC_CALL_POOL_APIS
                call_hashrate_calcs = sys._unit_tests_MINERMEDIC_CALL_HASHRATE_CALCS
        except:
            pass

        if call_pool_apis:
            try:
                self.__process_miner_pool_apis(miner, worker, algo, pool)
            except Exception as ex:
                logger.error(
                    "Problem while processing POOL APIS, pool='{}', error='{}'"
                    .format(pool, ex))

        if call_hashrate_calcs:
            try:
                self.__process_hashrate_calculations(miner, hashrate_ghs5s,
                                                     algo)
            except Exception as ex:
                logger.error("Problem while processing Hashrate Calcs, "
                             "algo='{}', hashrate='{}', error='{}'".format(
                                 algo, hashrate_ghs5s, ex))
示例#9
0
    def __init__(self):

        super(CoreAgent, self).__init__()

        from phenome_core.core.database.model.api import get_objectmodel_by_name
        model = get_objectmodel_by_name(_CORE_AGENT_MODEL_CLASSNAME_)

        if model is None:
            logger.error(
                "Model not initialized. Please check JSON/config and DB.")
            return None

        self.model_id = model.id
示例#10
0
    def relate(self, miner):

        """
        Creates a relationship between the Miner and the Mining Pool

        Returns:
                None
        """

        try:

            from phenome_core.core.database.db import db_session

            # by default we will not update db
            updated_db = False

            # find out if there is a relationship from this miner to this pool
            if miner.has_relation(self._pool_object) == False:

                # get the "MINING_ON" relationship
                rtype_id = get_relation_type_id('MINING_ON')

                # add the relation
                miner.add_relation(self._pool_object, rtype_id, None)

                # commit so the model will fill in the relation objects, etc.
                db_session.commit()

            # are there any relations of classtype MINING POOL
            relations = miner.get_relations_by_classtype(get_model_classtype_id_by_name(self.CLASSTYPE))

            if relations:

                # reset the flag
                updated_db = False

                # iterate through relations to pools, and disable all relations except for the one to the current pool
                for r in relations:
                    if r.object_to is not None and r.object_to.id == self._pool_object.id:
                        if r.enabled == False:
                            r.enabled = True
                            updated_db = True
                    elif r.enabled == True:
                        r.enabled = False
                        updated_db = True

                if updated_db:
                    db_session.commit()

        except:
            logger.error("ERROR creating relationship between miner '{}' to pool '{}'".format(miner.id, self._pool_object.unique_id))
    def _get_plug(self):

        plug = None

        try:
            # no more than 2 sec timeout to get powered state just in case we are calling from UI/API
            plug = TPLinkSmartPlug(host=self.ip,
                                   connect=True,
                                   port=self.port,
                                   timeout=2)
        except Exception as ex:
            logger.error(ex)

        return plug
示例#12
0
    def __determine_powered_state(self):

        try:
            pdu = self.__get_connected_pdu()
            if pdu is not None and self.connected_outlet:
                self.power_state = pdu.get_outlet_state(self.connected_outlet)
            elif self.is_pdu and self.health == 0:
                # again, assume it is Powered ON - but this should be set by the Poller
                self.power_state = self.CONST_STATE_ON

        except Exception as ex:
            logger.error("Could not get outlet state {} on PDU {} - "
                         "ERROR {}".format(self.connected_outlet,
                                           pdu.unique_id, ex))
            self.power_state = self.CONST_STATE_UNKNOWN

        return self.power_state
示例#13
0
    def _get_api_key_and_user_id(self):

        from phenome import flask_app
        api_key = flask_app.config.get("MINING_POOL_HUB_API_KEY")
        user_id = flask_app.config.get("MINING_POOL_HUB_USER_ID")

        if api_key is None or len(api_key)==0:
            # try to get from object
            api_key = self._pool_object.api_key
            user_id = self._pool_object.user_id

        if api_key is not None and len(api_key) > 0:
            # we have an API key, does it match the current one in the pool object?
            try:
                if self._pool_object.api_key is None or (self._pool_object.api_key is not None and self._pool_object.api_key != api_key):
                    from phenome_core.core.database.db import db_session
                    self._pool_object.api_key = api_key
                    self._pool_object.user_id = user_id
                    db_session.commit()
            except:
                logger.error("Cannot commit API parameters for Mining Pool Hub Pool object")

        return api_key, user_id
示例#14
0
    def __process_hashrate_calculations(self, miner, hashrate_ghs5s, algo):

        # it is possible with multi-algo miners and pools to switch the ALGO
        # let us double check the current algo vs the algo stored in the results
        # since the hashrate calcs depend on the ALGO

        try:
            algo_idx = get_algo_index(algo)
            algo_idx_from_pool = self.get_result(miner.id, 'algo')
            if algo_idx == -1 or (algo_idx_from_pool
                                  and algo_idx != algo_idx_from_pool):
                # get the changed algo
                algo = get_algo_by_index(algo_idx_from_pool)
        except:
            pass

        try:
            # do all hashrate stuff
            calculate_hashrates(self, miner, hashrate_ghs5s, algo)
        except:
            logger.error("Problem processing hashrate for "
                         "miner '{}' hashrate '{}'".format(
                             miner.id, hashrate_ghs5s))
示例#15
0
    def execute(self):

        # ok get the hashrate info from the miner
        hashrate_info_miner = self.results.get_result(self.object.id,
                                                      'hashrates')

        if hashrate_info_miner is None:
            # first time through here, exit
            return True

        # get some basic info from the miner hashrate stats
        current_hashrate = hashrate_info_miner.get('current')
        hashrate_units = hashrate_info_miner.get('units')

        if current_hashrate == 0:
            # the miner is just not hashing right now
            # should we add to the health score or just pass?
            # this will probably get taken care of by IDLE check
            return True

        # init some vars for the pool hashrate stats
        reported_hashrate_pool = None
        accepted_hashrate_pool = None
        current_hashrate_pool = 0
        speed_suffix_pool = None

        #  ok get the hashrate info from the pool
        hashrate_info_pool = self.results.get_result(self.object.id,
                                                     'hashrates_by_algo')

        pool_name = None

        if hashrate_info_pool is not None and len(hashrate_info_pool) == 1:

            # TODO - if it is larger than 1, check the current algo
            # that the miner is processing and match the hashrate by algo

            for key, value in hashrate_info_pool.items():

                pool_name = key

                # the accepted hashrate from the pool (this is really what you are getting paid for)
                accepted_hashrate_pool = value.get('accepted')

                # some pools include "reported" hashrate, should represent how much your miner is hashing,
                # and it should be very close if not the same as the "current_hashrate" reported directly
                # from your miner.
                reported_hashrate_pool = value.get('reported')

                # get the speed of the hashing rate from the pool for conversions
                speed_suffix_pool = value.get('speed_suffix')

                break

        # first check if there could be a problem from the pool's perspective
        if accepted_hashrate_pool is not None and speed_suffix_pool is not None:
            current_hashrate_pool = get_converted_hashrate(
                accepted_hashrate_pool, speed_suffix_pool, hashrate_units)
            reported_hashrate_pool = get_converted_hashrate(
                reported_hashrate_pool, speed_suffix_pool, hashrate_units)

        # TODO - possibly use moving average in the future
        # https://stackoverflow.com/questions/13728392/moving-average-or-running-mean

        # for now, use the max hashrate achieved thus far to check expected hashrate for the miner

        max_hashrate = hashrate_info_miner['max']
        units = hashrate_info_miner['units']
        error_level = self.args['error_level']
        warning_level = self.args['warning_level']

        has_error_miner = False
        has_error_pool = False
        has_warn_pool = False
        has_warn_miner = False

        # Handle Error Levels

        if error_level is None:
            error_pct = 0
        else:
            error_pct = percent_to_float(error_level)
            has_error_pool = (current_hashrate_pool <
                              (error_pct * max_hashrate))
            has_error_miner = (current_hashrate < (error_pct * max_hashrate))
            # do not take into account the pool hashrate to trigger an "ERROR"...
            self.has_error = (has_error_miner)

        # Handle Warning Levels

        if warning_level is None:
            warn_pct = 0
        else:
            warn_pct = percent_to_float(warning_level)
            has_warn_pool = (current_hashrate_pool < (warn_pct * max_hashrate))
            has_warn_miner = (current_hashrate < (warn_pct * max_hashrate))
            self.has_warning = (has_warn_miner or has_warn_pool)

        if self.has_error:
            self.error_message = self._build_error_message(
                "Miner '{}'".format(self.object.unique_id), current_hashrate,
                units, "error", error_level, max_hashrate)

        elif self.has_warning:
            if has_warn_pool:
                self.error_message = self._build_error_message(
                    "Pool '{}'".format(pool_name), current_hashrate_pool,
                    units, "warning", error_level, max_hashrate)
            else:
                self.error_message = self._build_error_message(
                    "Miner '{}'".format(self.object.unique_id),
                    current_hashrate, units, "warning", error_level,
                    max_hashrate)

        # do not process health scores if both levels are not set
        if error_pct == 0 or warn_pct == 0:
            return False

        if self.has_error or self.has_warning:

            # now, determine amount of hashrate "missing" from the total potential hashrate
            if has_error_pool or has_warn_pool:
                missing_hashrate = round(max_hashrate - current_hashrate_pool)
            else:
                missing_hashrate = round(max_hashrate - current_hashrate)

            warning_missing_hashrate = round(max_hashrate -
                                             (max_hashrate * warn_pct))
            error_missing_hashrate = round(max_hashrate -
                                           (max_hashrate * error_pct))
            compute_health_score(self.object, self.results, missing_hashrate,
                                 warning_missing_hashrate,
                                 error_missing_hashrate)

            try:
                if self.has_error:
                    if self.results.object_states.get(
                            self.object.id) is not None:
                        # set the object state flag in the case object states are used
                        self.results.object_states[self.object.id].__setattr__(
                            self._ERROR_STATE_KEY_, 1)
            except:
                logger.error("Problem setting error_state_key '{}' "
                             "for OBJECT ID={}".format(self._ERROR_STATE_KEY_,
                                                       self.object.id))

        else:

            # some special cases?

            # FOR DEBUG
            if current_hashrate > 0 and current_hashrate_pool > 0 and \
                    reported_hashrate_pool is not None and reported_hashrate_pool == 0:

                # there is a problem. the reported rate should be around one of these rates.
                logger.debug(
                    "Reported Hashrate from the pool is 0 for miner '{}'. "
                    "Something could be wrong with miner or the miner "
                    "is not reporting hashrate to the pool".format(
                        self.object.unique_id))

        return True
示例#16
0
def process_pool_apis(results, miner, worker, algo, pool):
    """
    Processes all pool APIs for the specific pool

    Returns:
        None

    """

    success = False
    pool_attrs = None
    pool_class = None
    pool_not_supported = False

    if pool not in pool_lookups:

        # get the pool classnames enum
        enum_pools = global_enums.get_enum_details('_POOL_STATS_CLASSNAMES_')

        for enum_pool in enum_pools:

            pool_attrs = enum_pool.value[0]
            pool_url_match = pool_attrs['url_match']
            m = re.search(pool_url_match, pool)

            if m is not None:

                # add the name to the attributes
                pool_attrs['name'] = enum_pool.name

                # add the attributes to the list.
                pool_lookups[pool] = pool_attrs

                break

            else:

                pool_attrs = None

    else:
        pool_attrs = pool_lookups[pool]

    if pool_attrs is not None:

        pool_id = pool_attrs["value"]
        pool_classname = pool_attrs["classname"]

        # get the model class to operate with
        pool_class = locate(pool_classname)

        unit_tests_running = False

        if pool_class is None:
            logger.error("pool classname {} could not be initialized".format(
                pool_classname))
        else:

            # create the mining pool
            mining_pool = pool_class(pool, pool_attrs)

            try:
                unit_tests_running = sys._unit_tests_running
            except:
                pass

            if not unit_tests_running:

                try:
                    # add the relations (we do not need them for unit tests)
                    mining_pool.relate(miner)
                except Exception as ex:
                    logger.debug(ex)

            # execute the "get_pool_stats(...)" method
            success = mining_pool.get_pool_stats(results, miner, worker, algo,
                                                 int(pool_id), pool)

    else:
        pool_not_supported = True
        logger.warning("POOL {} not yet supported".format(pool))
        # TODO - issue notification? log an issue on GitHub?

    if success is False and pool_not_supported is False:
        # There is a legitimate error...
        if pool_class is None:
            logger.error(
                "No Pool support found for Pool/Model/Algo {}/{}/{}".format(
                    pool, miner.model.model, algo))
        else:
            logger.error(
                "Pool stats not returned for Pool/Model/Algo {}/{}/{}".format(
                    pool, miner.model.model, algo))
示例#17
0
    def get_pool_stats(self, results, miner, worker, algo, pool_id, pool_url):

        # initialize with a 0 hashrate
        hashrate = 0.0

        # profit on MPOS site is measured in BTC/GH/DAY
        profit_btc_gh_day = 0.0

        if self._pool_info is None:
            # we are SOL
            logger.error("Cannot get needed POOL DATA from (getminingandprofitsstatistics) API call.")
            return False

        # get the API KEY and USER ID
        api_key, user_id = self._get_api_key_and_user_id()

        if api_key is None or len(api_key)==0 or user_id is None or user_id == 0:
            warn_msg = "MINING POOL HUB Needs API_KEY and USER_ID"
            warn_msg += " in order to retrieve Miner Data. Set using UI or .INI file"
            logger.warning(warn_msg)
            return False

        # Get the pool info, should be a list of potential multialgo pools based on port
        for pool in self._pool_info:
            algo = pool['algo']
            coin = pool['coin_name']

            # profit == BTC / GH / DAY
            profit_btc_gh_day = pool['profit']

            # try to build a URL:
            url = self._build_api_hashrate_url(api_key, user_id, coin)

            # create an API object
            api = RestAPI(url=url, port=80)

            # get the data
            json = api.get_json()
            if json:
                hashrate = json['getuserhashrate']['data']
                if hashrate > 0:
                    # this must be the right pool
                    break

        # get the algo
        algo_idx = get_algo_index(algo)
        if algo_idx == -1:
            return False

        # get the index and cost of the coin
        coin_idx = get_coin_index(coin)
        coin_cost = get_coin_cost_by_index(coin_idx,'USD')
        coin_cost_btc = get_coin_cost('BTC', 'USD')

        coin_cost_ratio = coin_cost_btc/coin_cost

        # The API is returning a number 1000 times larger than the WEB Dashboard, which is reporting in MH/S
        # We are going to surmise that the hashrate is reported in KH/s from the API

        # BUT we want to convert to GH/s to be consistent with profitability here
        # so force suffix to be GH
        speed_suffix = "GH"

        # and divide by 1M to get hashrate expressed in GH
        hashrate_ghs = (hashrate / 1000000)

        # We need the profitability to be in: COIN / speed_suffix / day
        # multiply by ratio of 'COIN' to BTC
        profit_coin_gh_day = profit_btc_gh_day * coin_cost_ratio

        # finally set the API results into the main results object
        results.populate_pool_results(miner, worker, pool_id, algo, algo_idx, coin_idx, coin_cost,
                                      profit_coin_gh_day, hashrate_ghs, None, speed_suffix)

        return True
    def _test_miner(self, model_id, miner_class, sim_urls, sim_file, alt_miner_unique_id = None):

        # clear any funkiness that could be going on with the DB session, since we are using temp objects, etc.
        db_session.rollback()

        # create a results object
        results = MinerResults()

        test_miner_unique_id = "UNIT_TEST_" + model_id

        # if for some reason it is in there...
        miner = get_object_by_unique_id(test_miner_unique_id)
        if miner is None:
            # create the miner
            miner = create_temporary_object(model_id, "127.0.0.1", "FF:FF:FF:FF:FF:FF", test_miner_unique_id)

        # store the ID for deletion later
        test_miner_id = miner.id
        results_miner_id = test_miner_id

        fan_speeds = None
        temperature = None
        hw_error_rates = None
        miner_chips = None
        uptime = None

        # tell the REST API which URLS to simulate, which port to target
        sys._unit_tests_API_SIMULATE_URLS = sim_urls
        sys._unit_tests_API_TARGET_LOC = self.CONST_SIMULATOR_API_TARGET_LOC
        sys._unit_tests_API_TARGET_PORT = self.api_port

        # skip the POOL API and HASHRATE calls
        sys._unit_tests_MINERMEDIC_CALL_POOL_APIS = False
        sys._unit_tests_MINERMEDIC_CALL_HASHRATE_CALCS = False

        # start simulator

        # get path to data file
        simulator_data_path = self.absolute_path_of_test_directory + "/apps/minermedic/resources/miners/" + sim_file

        # start the simulator
        simulator = self.startSimulator(simulator_data_path, 'JSON_RPC', self.api_port)

        try:

            miner.poll(results)

            if alt_miner_unique_id is not None:
                # specify another Miner ID to get the results for
                alt_miner = get_object_by_unique_id(alt_miner_unique_id)
                alt_miner_id = alt_miner.id
                results_miner_id = alt_miner_id

            fan_speeds = results.get_result(results_miner_id, 'fan_speeds_avg')
            temperature = results.get_result(results_miner_id, 'temperature')
            hw_error_rates = results.get_result(results_miner_id, 'hw_error_rates')
            miner_chips = results.get_result(results_miner_id, 'miner_chips')
            uptime = results.get_result(results_miner_id, 'uptimes')

        except Exception as ex:
            logger.error(ex)

        finally:

            try:
                simulator.stop()
                time.sleep(2)
            except:
                pass

        # return the results
        return fan_speeds, temperature, hw_error_rates, miner_chips, uptime
示例#19
0
    def execute(self):
        """
        Executes a Generic Check for Actions.

        Returns:
            True if runs successfully.
        """

        object_results = {}
        has_error = False
        has_warning = False
        tested_alt_error = False
        healthscore = 0

        # some computed locals
        _result = None
        _result_avg = None
        _result_min = None
        _result_max = None

        # create the eval and parsers
        parser = GenericArgumentParser(self.args)

        # create local copies of variables that may be needed for parameter evaluation
        object = self.object
        results = self.results
        input = parser.get('input')
        check_id = parser.get('id')
        use_avg = parser.get('use_avg')
        error_timestamp = parser.get('error_timestamp')
        error_state_key = parser.get('error_state_key')
        alternate_error_check = parser.get('has_error_alt')

        if input is None:
            logger.error(
                "No INPUT specified in args, check ID='{}'".format(check_id))
            return None
        try:
            if input.startswith("object."):
                # get the result directly from the object
                object_results = object.__getattribute__(input[7:])
            else:
                # get the result from the results
                object_results = self.results.get_result(self.object.id, input)
        except:
            pass

        if object_results is None:
            logger.warn(
                "No RESULTS found for input '{}', check ID='{}'".format(
                    input, check_id))
            return False

        # First, error detection
        # is it a single value or is it a list?

        # TODO - add value_type to determine how to treat here. Right now, we assume float/numeric

        if isinstance(object_results, float) or isinstance(
                object_results, int):
            _result = object_results
        else:
            if isinstance(object_results, str):
                _result = float(object_results)
            else:
                # must be a list
                if len(object_results) > 0:
                    _result_avg = math_functions.get_average_of_list(
                        object_results, 0)
                    _result_min = min(object_results)
                    _result_max = max(object_results)
                    if use_avg is not None and str_to_bool(use_avg):
                        _result = _result_avg
                    else:
                        # use the entire list as the result
                        _result = object_results

        if _result_min is None:
            _result_min = _result

        if _result_max is None:
            _result_max = _result

        if _result_avg is None:
            _result_avg = _result

        # create the Param Evaluator
        evaluator = BaseEvaluator(check_id, locals())
        evaluator.set_parser(parser)

        # get error and warning levels
        error_level = evaluator.parse_and_evaluate('error_level')
        warning_level = evaluator.parse_and_evaluate('warning_level')

        # get error and warning healthscores
        error_healthscore = evaluator.parse_and_evaluate('error_healthscore')
        warning_healthscore = evaluator.parse_and_evaluate(
            'warning_healthscore')

        # determine if there is an error
        has_error = evaluator.evaluate_as_boolean(parser.get('has_error'))

        if has_error is False and alternate_error_check is not None:
            has_error = evaluator.evaluate_as_boolean(alternate_error_check)
            tested_alt_error = True

        if has_error is False:

            # determine if there is a warning
            has_warning = evaluator.evaluate_as_boolean(
                parser.get('has_warning'))
            if has_warning:
                self.has_warning = True
                self.error_message = evaluator.parse_and_evaluate(
                    'warning_message')
                healthscore = warning_healthscore
        else:

            self.has_error = True

            if tested_alt_error and parser.get(
                    'error_message_alt') is not None:
                self.error_message = evaluator.parse_and_evaluate(
                    'error_message_alt')
            else:
                self.error_message = evaluator.parse_and_evaluate(
                    'error_message')

            healthscore = error_healthscore

            try:
                if error_state_key is not None and self.object_states is not None:
                    # set the object state flag in the case object states are used
                    self.object_states.__setattr__(error_state_key, 1)
            except:
                logger.error(
                    "Problem setting error_state_key '{}' for OBJECT ID={}".
                    format(error_state_key, self.object.id))

        if self.results:

            if healthscore is None or healthscore == 0:
                if error_level and warning_level:
                    compute_health_score(self.object, self.results,
                                         _result_avg, warning_level,
                                         error_level)
            else:
                # add the pre-determined health score
                add_health_score(self.object, self.results, healthscore)

        # we are here, so it ran, and was a success
        return True
示例#20
0
    def _test_mining_pool(self, watts, coin_address, worker, algo,
                          hashrate_by_algo, pool, sim_urls, sim_file,
                          miner_model_id):

        algo_id = 0
        hashrate = None
        profitability = None

        results = MinerResults()
        miner = MockObject()

        if miner_model_id is not None:
            miner.model = get_objectmodel_by_name(miner_model_id)

        # use an arbitrary number for our power so we can get profitability numbers
        miner.power_usage_watts = watts

        # coin address is needed to query for ethermine stats
        miner.coin_address = coin_address

        worker = worker
        algo = algo
        pool = pool

        # tell the REST API which URLS to simulate, which port to target
        sys._unit_tests_API_SIMULATE_URLS = sim_urls
        sys._unit_tests_API_TARGET_LOC = self.CONST_SIMULATOR_API_TARGET_LOC
        sys._unit_tests_API_TARGET_PORT = self.CONST_SIMULATOR_API_TARGET_PORT

        # do the POOL API and HASHRATE calls
        sys._unit_tests_MINERMEDIC_CALL_POOL_APIS = True
        sys._unit_tests_MINERMEDIC_CALL_HASHRATE_CALCS = True

        # start simulator

        # get path to data file
        simulator_data_path = self.absolute_path_of_test_directory + "/apps/minermedic/resources/mining_pools/" + sim_file

        # start the simulator
        simulator = self.startSimulator(
            simulator_data_path, 'HTTP',
            str(self.CONST_SIMULATOR_API_TARGET_PORT))

        try:

            # contact pools, do profitability stuff
            process_pool_apis(results, miner, worker, algo, pool)

            # get the resulting "algo_idx"
            algo_id = results.get_result(miner.id, 'algo')

            # get the accepted hashrate from the POOL
            hashrate = results.get_result(
                miner.id, 'hashrates_by_algo')[hashrate_by_algo]['accepted']

            profitability = results.get_result(miner.id, 'profitability')

        except Exception as ex:
            logger.error(ex)

        finally:
            simulator.stop()

        # return the results
        return algo_id, hashrate, profitability
示例#21
0
    def process_object_states(self, object, results):
        """
        Simple state machine that handles default Miner Actions
        when specific Actions are not defined in the ActionModel.

        Returns:
            Boolean

        """

        if object is None:
            # should not happen
            logger.error("OBJECT is None")
            return False

        if object.admin_disabled:
            return False

        if not self.has_object_init_time_passed(object, results):
            # do not execute the state machine for items that are still initializing...
            return False

        logger.debug("process states for object {}".format(object.unique_id))

        states = self.get_object_state(object, results)

        # STEP 1 - REACHABILITY
        if states.poll_error == 1:

            if states.ping_error == 1:

                if hasattr(object, "power_state") and hasattr(
                        object, "connected_outlet"):
                    # TODO - add error message to results/object?
                    return object.powercycle()
                else:
                    if object.wake_on_lan():
                        return True
                    else:
                        # there may be an object/subclass specific PowerCycle
                        return object.powercycle()

            else:
                # object does not poll, but can ping, try to restart
                return object.restart_miner()

        # STEP 2 - CHECK TEMPS
        if states.temp_error == 1:
            message = "TEMPS too high on {}".format(object.ip)
            return object.powercycle()

        # STEP 3 - CHIPS
        if states.chip_error == 1:
            message = "CHIP errors on {}".format(object.ip)
            return object.restart_miner()

        # STEP 4 - HASHRATE
        if states.hashrate_error == 1:
            message = "HASHRATE problems on {}".format(object.ip)
            # Actions should be handled now by the action_model

        # STEP 5 - UI
        if states.ui_error == 1:
            message = "UI down on {}".format(object.ip)
            return object.restart_miner()

        # STEP 6 - PROFITABILITY
        profitability = results.get_result(object.id, 'profitability')
        if profitability is None and object.is_powered_off():
            # this means we did not poll and we still need to run the profitability check
            return execute_single_action(object, results, 'check_profit',
                                         'CRYPTO_MINER')

        return False