예제 #1
0
    def scan(self, collector):

        """
        Scans for RuuviTags using BlueTooth

        Returns:
            None
        """

        logger.debug("Start scan...")

        try:

            tags = self.find_tags()

            # Now, iterate through found tags and put into the collector

            for tag in tags:

                if not collector.has_object(tag):

                    # get data about this tag
                    data = tags[tag]

                    # and place tag into the DB
                    sensor = self.add(tag, data)

                    if sensor:
                        # we have successfully added into the DB
                        collector.populate(sensor)

        except Exception as ex:
            logger.error("Problem during scan: {}".format(ex))
예제 #2
0
    def poll_pool_stats(miner):
        """
        Pool Stats are retrieved from the Miner. Generic, shared call.
        """

        last_share_time = 0
        miner.algo = None

        # WORKER, POOL, ALGO, LAST SHARE TIME
        miner_pools = cgminer.get_pools(miner.ip)

        for miner_pool in miner_pools['POOLS']:
            miner_pool_status = miner_pool.get('Status')
            miner_pool_stratum_active = miner_pool.get('Stratum_Active')

            if (miner_pool_status is not None and miner_pool_status
                    == "Alive") or (miner_pool_stratum_active is not None
                                    and miner_pool_stratum_active == True):
                # pull pertinent information
                worker = miner_pool['User']
                # get the PORT as well, different pools/algos at different ports
                miner.pool = miner_pool['URL'].split("//", 1)[-1]
                miner.algo = get_algo(miner.pool)
                last_share_time = miner_pool.get('Last Share Time')
                break

        if miner.algo is None:
            miner.algo = miner.hashrate[0]['algo']

        # IDLE STATE
        if last_share_time is not None:

            # convert last share time to minutes (i.e. share cycles) and then compare and set if needed
            if isinstance(last_share_time, int):
                last_share_minutes, last_share_seconds = get_total_minutes_seconds_from_timestamp(
                    last_share_time)
            else:
                last_share_minutes, last_share_seconds = get_total_minutes_seconds_from_clock_time(
                    last_share_time)

            if last_share_minutes >= 1:

                if miner.last_share_time is not None and miner.last_share_time > 0:
                    last_share_delta = last_share_minutes - miner.last_share_time
                else:
                    last_share_delta = 0

                # set the last share time
                miner.last_share_time = last_share_minutes

                if last_share_delta >= 2:
                    logger.debug("process_miner() - miner=" + str(miner.id) +
                                 " - Miner is IDLE.")
                    miner.idle_cycles_count = last_share_delta
                elif miner.idle_cycles_count > 1:
                    # reset it
                    miner.idle_cycles_count = 0

        # get the coin address and worker
        miner.coin_address, miner.worker = parse_worker_string(miner, worker)
예제 #3
0
def miner_hashrate():
    """
    Generates the MinerMedic HashRate Screen
    :return: HTML Template with Hashrate Information
    """

    logger.debug("hashrate() - remote addr:" + request.remote_addr)
    return render_template('hashrate.html', version='0.0')
예제 #4
0
def miner_profitability():
    """
    Returns the MinerMedic Profitability Screen
    :return: HTML Template for Profitability
    """

    logger.debug("/profitability - remote addr:" + request.remote_addr)
    # render the template
    return render_template('profitability.html', version='0.0')
예제 #5
0
def miner_predictions():
    """
    Returns a Model Template Prediction Status screen
    :return: HTML Template for Model Prediction Status
    """

    logger.debug("/predictions - remote addr:" + request.remote_addr)
    # render the template
    return render_template('predictions.html', version='0.0')
예제 #6
0
    def call(self, command, arg=None):

        """ Initialize a socket connection,
        send a command (a json encoded dict) and
        receive the response (and decode it).
        """

        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(self.socket_timeout)

        try:
            sock.connect((self.host, self.port))
            payload = {self.payload_command: command}

            # first deal with arguments
            if arg is not None:

                # Parameter must be converted to basestring (no int)
                if sys.version_info.major == 2:
                    arg = str(arg)

                if self.encapsulate_args:
                    payload.update({self.parameter_argument: [arg]})
                else:
                    payload.update({self.parameter_argument: arg})

            send_json(sock, payload)

            response = self._receive(sock, 65536)

        except Exception as e:
            # This is ANTMINER SPECIFIC... Horrible.
            if sys.version_info.major == 2:
                return dict({'STATUS': [{'STATUS': 'error', 'description': str(e)}]})
            if sys.version_info.major == 3:
                return dict({'STATUS': [{'STATUS': 'error', 'description': e}]})
        else:
            # ALSO ANTMINER SPECIFIC
            # also add a comma on the output of the `stats` command by replacing '}{' with '},{'
            if response.find("\"}{\"STATS\":"):
                response = response.replace("\"}{\"STATS\":","\"},{\"STATS\":",1)

            # Another bug in Antminer JSON
            # the null byte makes json decoding unhappy
            # TODO - test for NULL byte at end
            if (response.endswith("}") == False):
                response = response[:-1]

            if logger.isEnabledFor(logging.DEBUG):
                logger.debug("CALL: {}\nRESPONSE: {}".format(payload, response))

            return json.loads(response)

        finally:
            # sock.shutdown(socket.SHUT_RDWR)
            sock.close()
예제 #7
0
    def restart_miner(self):
        """
        Attempts to restart the miner. This is a wrapper for "miner.restart()".
        The actual "restart" method must be implemented individually for each subclass of Miner.

        Returns:
            boolean

        """

        success = False

        # can we still ping this miner?
        if self.object_states.ping_error == 0:

            # we can still ping it, so attempt to restart miner
            logger.debug("Attempt to restart object {}".format(self.id))

            success = False
            self.__init_restart_delay(self.object)

            # should we restart it?
            restart = time_functions.get_timeout_expired(
                self.last_restart_time, self.restart_delay)

            if restart:

                # do the actual restart
                success = self.restart()

                # set the last restart attempt time
                self.last_restart_time = time_functions.current_milli_time()

                if success:

                    # reset the init state so state machine does not try to auto-powercycle, etc.
                    self._reset_init_state()

                    logger.info("Restarted Miner {}(ID={})".format(
                        self.object.unique_id, self.object.id))
                else:
                    # according to state machine diagram, if restart fails, powercycle (OR OFF)
                    success = self.powercycle()

        return success
예제 #8
0
    def __init__(self, host='localhost', port=4028, payload_command='command', parameter_argument='parameter', encapsulate_args=False):

        self.data = {}
        self.host = host

        # just in case the configuration is empty and sends None, use the default port
        if port is None:
            port = CgminerAPI.DEFAULT_PORT

        self.port = port
        self.payload_command = payload_command
        self.parameter_argument = parameter_argument
        self.encapsulate_args = encapsulate_args
        self.socket_timeout = 3

        try:
            if sys._unit_tests_running:
                self.host = sys._unit_tests_API_TARGET_LOC
                self.port = sys._unit_tests_API_TARGET_PORT
                # nice long timeout to allow for stepping through debugger
                logger.debug("Unit Tests running - setting socket_timeout to 20")
                self.socket_timeout = 20
        except:
            pass
예제 #9
0
    def geturl(self, url='index.htm'):
        """ Get a URL from the userid/password protected powerswitch page
            Return None on failure
        """
        full_url = "http://%s/%s" % (self.hostname, url)
        result = None
        request = None
        for i in range(0, self.retries):

            try:
                request = requests.get(full_url, auth=(self.userid, self.password,),  timeout=self.timeout)
            except requests.exceptions.RequestException as e:
                logger.warning("Request to URL {} timed out - {} retries left.".format(full_url, (self.retries - i - 1)))
                logger.debug("Caught exception {}".format(e))
                continue

            if request is not None and request.status_code == 200:
                result = request.content
                break

        if request is not None:
            logger.debug('Request to URL {} - response code: {}'.format(full_url, request.status_code))

        return result
예제 #10
0
    def find_tags(self):

        """
        Find all RuuviTags.

        Returns:
            dict: MAC and state of found sensors

        """

        global BLUEZ_ERROR_DISPLAYED

        logger.debug("Try to find tags...")

        # This is the amount of time to listen for tags - TODO get from config file
        timeout = 10

        tags = {}
        tags_skip = {}

        macs = []
        start = current_milli_time()

        try:

            for data in RuuviTagSensor._get_ruuvitag_datas(macs, timeout):

                if current_milli_time() > (start+(timeout*1000)):
                    break

                if (data[0] in tags) or (data[0] in tags_skip):
                    continue

                logger.debug("Found TAG {}, DATA {}".format(data[0],data[1]))

                data_format = data[1]['data_format']

                if data_format < 4:
                    tags[data[0]] = data[1]
                else:
                    tags_skip[data[0]] = data[1]
                    logger.debug("Skipping data_format 4 tag - polling locks up thread")

        except:
            logger.error("error while finding tags")

        if tags is None or len(tags)==0:
            if not BLUEZ_ERROR_DISPLAYED:
                BLUEZ_ERROR_DISPLAYED = True
                logger.warning("No RuuviTags Found. Verify this is running on Raspian and "
                               "that you have installed BLUEZ: 'sudo apt-get install bluez-hcidump'")

        return tags
예제 #11
0
    def process_object_states(self, object, results):
        """
        Simple state machine that handles default Miner Actions
        when specific Actions are not defined in the ActionModel.

        Returns:
            Boolean

        """

        if object is None:
            # should not happen
            logger.error("OBJECT is None")
            return False

        if object.admin_disabled:
            return False

        if not self.has_object_init_time_passed(object, results):
            # do not execute the state machine for items that are still initializing...
            return False

        logger.debug("process states for object {}".format(object.unique_id))

        states = self.get_object_state(object, results)

        # STEP 1 - REACHABILITY
        if states.poll_error == 1:

            if states.ping_error == 1:

                if hasattr(object, "power_state") and hasattr(
                        object, "connected_outlet"):
                    # TODO - add error message to results/object?
                    return object.powercycle()
                else:
                    if object.wake_on_lan():
                        return True
                    else:
                        # there may be an object/subclass specific PowerCycle
                        return object.powercycle()

            else:
                # object does not poll, but can ping, try to restart
                return object.restart_miner()

        # STEP 2 - CHECK TEMPS
        if states.temp_error == 1:
            message = "TEMPS too high on {}".format(object.ip)
            return object.powercycle()

        # STEP 3 - CHIPS
        if states.chip_error == 1:
            message = "CHIP errors on {}".format(object.ip)
            return object.restart_miner()

        # STEP 4 - HASHRATE
        if states.hashrate_error == 1:
            message = "HASHRATE problems on {}".format(object.ip)
            # Actions should be handled now by the action_model

        # STEP 5 - UI
        if states.ui_error == 1:
            message = "UI down on {}".format(object.ip)
            return object.restart_miner()

        # STEP 6 - PROFITABILITY
        profitability = results.get_result(object.id, 'profitability')
        if profitability is None and object.is_powered_off():
            # this means we did not poll and we still need to run the profitability check
            return execute_single_action(object, results, 'check_profit',
                                         'CRYPTO_MINER')

        return False
예제 #12
0
    def poll(miner, results):

        logger.debug("poll_etherminer() - miner=" + str(miner.id))

        elapsed_secs = -1

        # get the miner stats
        miner_stats = cgminer.get_claymore_stats(miner.ip)

        # if miner not accessible... add to inactive
        if miner_stats.get('result') is None:
            results.inactive_objects.append(miner)

        else:

            result = miner_stats['result']

            # TODO - to support dual mining, will have to refactor this code
            # and probably call twice, once for each algo

            algo_idx = 0

            # version of claymore and COIN being mined
            version, coin = result[0].split(" - ")

            if coin is not None and len(
                    coin) > 0 and coin != miner.hashrate[0]['coin']:
                # coin changed, need to update it
                coin_index = get_coin_index(coin)
                if coin_index >= 0:
                    # set the COIN currently being mined
                    miner.hashrate[algo_idx]['coin'] = coin

            # process the local config to get miner coin address and worker name
            GPU_CLAYMORE.process_miner_local_config(miner)

            # Get pool name
            pool = result[7]

            if coin == "ETH" or coin == "ETC" or "ethermine" in pool:
                algo = "ethash"
            else:
                # usually you can get the algo from the pool
                algo = get_algo(pool)

            if miner.hashrate[algo_idx]['algo'] != algo and algo is not None:
                miner.hashrate[algo_idx]['algo'] = algo

            # Get miner's GPU stats
            gpu_hashes_string = result[3]
            gpu_hashes = gpu_hashes_string.split(';')

            # count number of working GPU
            Os = sum([int(x) > 0 for x in gpu_hashes])

            # count number of non-working GPUs (does not apply)
            Xs = 0

            # get number of in-active GPUs
            Gi = sum([int(x) == 0 for x in gpu_hashes])

            # Get total number of GPUs
            Ts = len(gpu_hashes)

            if Gi == Ts:
                logger.warning("process_claymore() - miner=" + str(miner.id) +
                               " - Miner is IDLE.")
                # increase idle cycles
                miner.idle_cycles_count = miner.idle_cycles_count + 1

            # Get the temperatures of the miner, they are mixed with fan speeds
            temps_and_fans = result[6].split(';')

            # get the temps and convert to ints
            temps = temps_and_fans[::2]
            temps = [int(i) for i in temps]

            # get the fan speeds and convert to ints
            fan_pcts = temps_and_fans[1::2]
            fan_pcts = [int(i) for i in fan_pcts]

            # Get Total Hashrate for Miner (expressed in KH/s from the API)
            eth_stats = result[2].split(';')
            current_hashrate = int(eth_stats[0])

            # Get Gigahashes by converting the KH to GH
            ghs5s = float(int(current_hashrate) / 1000000)

            # TODO - revisit with dual mining
            algo_rate = miner.hashrate[algo_idx]['rate']

            if algo_rate is None or algo_rate == 0:
                # get the hashrate in the correct units
                normalized_rate, hashrate_units = get_normalized_hashrate_from_gigahash_per_sec(
                    ghs5s, miner.hashrate[algo_idx]['units'])
                miner.hashrate[algo_idx]['rate'] = normalized_rate

            if miner.power_usage_watts == 0:

                if miner.hashrate[algo_idx]['power'] == 0:
                    # TODO - if this is connected to a PDU, check whether there is Power Management on the PDU
                    # and if it will tell you power usage. If so, use those estimates...
                    # and set miner.power_usage_watts = XXX
                    # If not...
                    pass

                if miner.power_usage_watts == 0:
                    # estimate power usage based on card count and default GPU card power usage setting
                    # FALLBACK TO CONFIG AND DEFAULTS
                    miner.power_usage_watts = GPU_CLAYMORE.get_power_usage_estimate_by_card_count(
                        Ts)

            eth_shares_good = int(eth_stats[1])
            eth_shares_stale = int(eth_stats[2])
            eth_shares_invalid = int(result[8].split(';')[0])
            eth_shares_total = eth_shares_good + eth_shares_stale + eth_shares_invalid

            hw_error_rate_raw = ((eth_shares_stale + eth_shares_invalid) /
                                 eth_shares_total) * 100
            hw_error_rate = math_functions.get_formatted_float_rate(
                hw_error_rate_raw, 4)

            # Get uptime
            elapsed_secs = int(result[1]) * 60

            chip_stats = MinerChipStats(Os, Xs, Gi, Ts)

            results.populate_miner_results(miner, elapsed_secs,
                                           miner.worker_name, algo, pool,
                                           chip_stats, temps, fan_pcts, ghs5s,
                                           hw_error_rate)

            if (Gi > 0):
                # some special debug for GPU miner issues
                logger.debug("Missing {} GPUs in miner {}, stats={}".format(
                    Gi, miner.ip, gpu_hashes_string))

        return elapsed_secs
예제 #13
0
    def powercycle(self):
        """
        Attempts to PowerCycle the object.

        Returns:
            True - if power cycled
        """

        # If we are simply powered OFF, then just power ON instead of going
        # through the entire set of checks and then turning OFF / ON

        if self.get_powered_state() == self.CONST_STATE_OFF:
            # just turn it ON
            return self.power_on()

        # BY DEFAULT, CHECK STATE MACHINE - and other PARAMETERS

        if not self.force_action:

            if hasattr(self, 'last_wol_time'):

                # Was this device recently WOL'd??
                # If so, we should wait a little time for the WOL packet to hit and for the system to come back up
                # if it has been a period of time after the WOL packet and the device is still not PINGABLE,
                # then we can try a powercycle

                try_powercycle = self.last_wol_time == 0 or \
                                 (self.last_wol_time > 0 and
                                  (self.last_wol_time + (DEVICE_WAKE_ON_LAN_TIMEOUT_MS / 2)) > time_functions.current_milli_time())

                if not try_powercycle:
                    logger.debug(
                        "{}(ID={}) was very recently sent WOL packet. "
                        "Waiting longer before POWERCYCLE attempt".format(
                            self.unique_id, self.id))
                    return False

            # have we waited enough time to clear the powercycle count?
            if self.powercycle_count > 0:

                max_time_expired = time_functions.get_timeout_expired(
                    self.last_powercycle_time,
                    self.powercycle_max_wait_time * 1000)

                if max_time_expired:
                    # we can clear them
                    self.powercycle_count = 0

            # have we power-cycled too many times?
            if self.powercycle_count >= self.powercycle_max_attempts:

                # should we disable due to too many power cycle attemps?
                disable = config_helpers.get_config_value(
                    "MISC", "device_disable_after_failed_powercycle", True)

                if disable:

                    powered_off = self.__poweroff("powercycled too many times")

                    if powered_off:

                        # disable the device
                        self.set_enabled(False)

                return powered_off

        # try to actually powercycle
        return self.__powercycle()
예제 #14
0
    def execute(self):

        # ok get the hashrate info from the miner
        hashrate_info_miner = self.results.get_result(self.object.id,
                                                      'hashrates')

        if hashrate_info_miner is None:
            # first time through here, exit
            return True

        # get some basic info from the miner hashrate stats
        current_hashrate = hashrate_info_miner.get('current')
        hashrate_units = hashrate_info_miner.get('units')

        if current_hashrate == 0:
            # the miner is just not hashing right now
            # should we add to the health score or just pass?
            # this will probably get taken care of by IDLE check
            return True

        # init some vars for the pool hashrate stats
        reported_hashrate_pool = None
        accepted_hashrate_pool = None
        current_hashrate_pool = 0
        speed_suffix_pool = None

        #  ok get the hashrate info from the pool
        hashrate_info_pool = self.results.get_result(self.object.id,
                                                     'hashrates_by_algo')

        pool_name = None

        if hashrate_info_pool is not None and len(hashrate_info_pool) == 1:

            # TODO - if it is larger than 1, check the current algo
            # that the miner is processing and match the hashrate by algo

            for key, value in hashrate_info_pool.items():

                pool_name = key

                # the accepted hashrate from the pool (this is really what you are getting paid for)
                accepted_hashrate_pool = value.get('accepted')

                # some pools include "reported" hashrate, should represent how much your miner is hashing,
                # and it should be very close if not the same as the "current_hashrate" reported directly
                # from your miner.
                reported_hashrate_pool = value.get('reported')

                # get the speed of the hashing rate from the pool for conversions
                speed_suffix_pool = value.get('speed_suffix')

                break

        # first check if there could be a problem from the pool's perspective
        if accepted_hashrate_pool is not None and speed_suffix_pool is not None:
            current_hashrate_pool = get_converted_hashrate(
                accepted_hashrate_pool, speed_suffix_pool, hashrate_units)
            reported_hashrate_pool = get_converted_hashrate(
                reported_hashrate_pool, speed_suffix_pool, hashrate_units)

        # TODO - possibly use moving average in the future
        # https://stackoverflow.com/questions/13728392/moving-average-or-running-mean

        # for now, use the max hashrate achieved thus far to check expected hashrate for the miner

        max_hashrate = hashrate_info_miner['max']
        units = hashrate_info_miner['units']
        error_level = self.args['error_level']
        warning_level = self.args['warning_level']

        has_error_miner = False
        has_error_pool = False
        has_warn_pool = False
        has_warn_miner = False

        # Handle Error Levels

        if error_level is None:
            error_pct = 0
        else:
            error_pct = percent_to_float(error_level)
            has_error_pool = (current_hashrate_pool <
                              (error_pct * max_hashrate))
            has_error_miner = (current_hashrate < (error_pct * max_hashrate))
            # do not take into account the pool hashrate to trigger an "ERROR"...
            self.has_error = (has_error_miner)

        # Handle Warning Levels

        if warning_level is None:
            warn_pct = 0
        else:
            warn_pct = percent_to_float(warning_level)
            has_warn_pool = (current_hashrate_pool < (warn_pct * max_hashrate))
            has_warn_miner = (current_hashrate < (warn_pct * max_hashrate))
            self.has_warning = (has_warn_miner or has_warn_pool)

        if self.has_error:
            self.error_message = self._build_error_message(
                "Miner '{}'".format(self.object.unique_id), current_hashrate,
                units, "error", error_level, max_hashrate)

        elif self.has_warning:
            if has_warn_pool:
                self.error_message = self._build_error_message(
                    "Pool '{}'".format(pool_name), current_hashrate_pool,
                    units, "warning", error_level, max_hashrate)
            else:
                self.error_message = self._build_error_message(
                    "Miner '{}'".format(self.object.unique_id),
                    current_hashrate, units, "warning", error_level,
                    max_hashrate)

        # do not process health scores if both levels are not set
        if error_pct == 0 or warn_pct == 0:
            return False

        if self.has_error or self.has_warning:

            # now, determine amount of hashrate "missing" from the total potential hashrate
            if has_error_pool or has_warn_pool:
                missing_hashrate = round(max_hashrate - current_hashrate_pool)
            else:
                missing_hashrate = round(max_hashrate - current_hashrate)

            warning_missing_hashrate = round(max_hashrate -
                                             (max_hashrate * warn_pct))
            error_missing_hashrate = round(max_hashrate -
                                           (max_hashrate * error_pct))
            compute_health_score(self.object, self.results, missing_hashrate,
                                 warning_missing_hashrate,
                                 error_missing_hashrate)

            try:
                if self.has_error:
                    if self.results.object_states.get(
                            self.object.id) is not None:
                        # set the object state flag in the case object states are used
                        self.results.object_states[self.object.id].__setattr__(
                            self._ERROR_STATE_KEY_, 1)
            except:
                logger.error("Problem setting error_state_key '{}' "
                             "for OBJECT ID={}".format(self._ERROR_STATE_KEY_,
                                                       self.object.id))

        else:

            # some special cases?

            # FOR DEBUG
            if current_hashrate > 0 and current_hashrate_pool > 0 and \
                    reported_hashrate_pool is not None and reported_hashrate_pool == 0:

                # there is a problem. the reported rate should be around one of these rates.
                logger.debug(
                    "Reported Hashrate from the pool is 0 for miner '{}'. "
                    "Something could be wrong with miner or the miner "
                    "is not reporting hashrate to the pool".format(
                        self.object.unique_id))

        return True
예제 #15
0
def hello_world():

    logger.debug("/helloworld:" + request.remote_addr)

    # render a simple helloworld template in ./templates folder
    return render_template('helloworld.html', version='0.0')
예제 #16
0
    def poll(miner, results):

        logger.debug("poll_innosilicon() - miner=" + str(miner.id))

        elapsed_secs = -1
        last_share_time = 0

        # get the miner stats
        miner_stats = cgminer.get_cgminer_stats(miner.ip)

        # if miner not accessible
        if miner_stats['STATUS'][0]['STATUS'] == 'error':
            results.inactive_objects.append(miner)

        else:

            # retrieve and process the pool stats on the Miner
            miner.poll_pool_stats()

            # CHIPS, FANS, TEMPS

            Os = 0
            Xs = 0
            Ds = 0
            temps_chips = []
            fan_speeds = []

            # get total number of chips per board
            chips_per_board = int(miner.chips.split(',')[0])
            boards = 0

            for board in miner_stats['STATS']:

                # Get ASIC chip, temp, fan, counts, status, etc. all with a single loop
                for key in board.keys():

                    # BOARD COUNT
                    if key == "ID":
                        value = board[key]
                        if 'BA' in value:
                            boards = boards + 1

                    # TEMPS
                    if "TEMP(AVG)" in key:
                        value = board[key]
                        if (value > 0):
                            temps_chips.append(value)

                    # CHIPS

                    if "ASIC" in key:
                        value = board[key]
                        # good chips
                        Os = Os + value
                        # bad chips
                        Xs = Xs - (chips_per_board - value)

            Ts = boards * chips_per_board

            # summarize chip stats with an object
            chip_stats = MinerChipStats(Os, Xs, Ds, Ts)

            # HASHRATE

            # what are the hashrate units of this miner?
            hashrate_units = miner.hashrate[0]['units'].upper()
            hashrate_pool = None

            summary = cgminer.get_summary(miner.ip)['SUMMARY'][0]

            try:
                # Get the current hashrate
                hashrate_pool = float(str(summary['GHS 5s']))
            except:
                pass

            if hashrate_pool is None:
                # try for older MHS
                try:
                    hashrate_pool = float(str(summary['MHS 5s']))
                except:
                    pass

            # now, convert to GH/s which is what the normalized result handling requires
            hashrate_pool_ghs = get_normalized_gigahash_per_sec_from_hashrate(
                hashrate_pool, hashrate_units)

            # get the total possible hashrate by multiplying base board hashrate
            # by number of boards being reported by the miner controller
            hashrate_miner = int(miner.hashrate[0]['rate'] * boards)

            # set the override total hashrate for this miner
            results.set_miner_hashrate_override(miner, hashrate_miner)

            try:
                hw_error_rate = math_functions.get_formatted_float_rate(
                    summary['Device Hardware%'], 4)
            except:
                pass

            # UPTIME
            elapsed_secs = miner_stats['STATS'][0]['Elapsed']

            # Populate results
            results.populate_miner_results(miner, elapsed_secs, miner.worker,
                                           miner.algo, miner.pool, chip_stats,
                                           temps_chips, fan_speeds,
                                           hashrate_pool_ghs, hw_error_rate)

        return elapsed_secs
예제 #17
0
def process_pool_apis(results, miner, worker, algo, pool):
    """
    Processes all pool APIs for the specific pool

    Returns:
        None

    """

    success = False
    pool_attrs = None
    pool_class = None
    pool_not_supported = False

    if pool not in pool_lookups:

        # get the pool classnames enum
        enum_pools = global_enums.get_enum_details('_POOL_STATS_CLASSNAMES_')

        for enum_pool in enum_pools:

            pool_attrs = enum_pool.value[0]
            pool_url_match = pool_attrs['url_match']
            m = re.search(pool_url_match, pool)

            if m is not None:

                # add the name to the attributes
                pool_attrs['name'] = enum_pool.name

                # add the attributes to the list.
                pool_lookups[pool] = pool_attrs

                break

            else:

                pool_attrs = None

    else:
        pool_attrs = pool_lookups[pool]

    if pool_attrs is not None:

        pool_id = pool_attrs["value"]
        pool_classname = pool_attrs["classname"]

        # get the model class to operate with
        pool_class = locate(pool_classname)

        unit_tests_running = False

        if pool_class is None:
            logger.error("pool classname {} could not be initialized".format(
                pool_classname))
        else:

            # create the mining pool
            mining_pool = pool_class(pool, pool_attrs)

            try:
                unit_tests_running = sys._unit_tests_running
            except:
                pass

            if not unit_tests_running:

                try:
                    # add the relations (we do not need them for unit tests)
                    mining_pool.relate(miner)
                except Exception as ex:
                    logger.debug(ex)

            # execute the "get_pool_stats(...)" method
            success = mining_pool.get_pool_stats(results, miner, worker, algo,
                                                 int(pool_id), pool)

    else:
        pool_not_supported = True
        logger.warning("POOL {} not yet supported".format(pool))
        # TODO - issue notification? log an issue on GitHub?

    if success is False and pool_not_supported is False:
        # There is a legitimate error...
        if pool_class is None:
            logger.error(
                "No Pool support found for Pool/Model/Algo {}/{}/{}".format(
                    pool, miner.model.model, algo))
        else:
            logger.error(
                "Pool stats not returned for Pool/Model/Algo {}/{}/{}".format(
                    pool, miner.model.model, algo))
예제 #18
0
    def poll(miner, results):

        logger.debug("poll_avalonminer() - miner=" + str(miner.id))

        elapsed_secs = -1
        last_valid_work = 0

        if hasattr(miner, 'last_poll_time'):
            last_poll_time = miner.last_poll_time
            if ((current_milli_time() - last_poll_time) < 60000):
                # Do not poll it again, we handle all miners on the controller during the poll phase
                return True

        miners = []
        miners_info = []

        # get the miner stats
        miner_stats = cgminer.get_avalon_stats(miner.ip)

        # if miner not accessible
        if miner_stats['STATUS'][0]['STATUS'] is not 'S':
            results.inactive_objects.append(miner)
            return elapsed_secs

        # controller elapsed seconds
        elapsed_secs = miner_stats['STATS'][0]['Elapsed']

        # assuming all is good, get the devs and pools
        miner_stats_devs = cgminer.get_avalon_devs(miner.ip)
        miner_pools = cgminer.get_pools(miner.ip)

        # basic pool processing
        for miner_pool in miner_pools['POOLS']:
            miner_pool_status = miner_pool.get('Status')
            miner_pool_stratum_active = miner_pool.get('Stratum Active')

            if (miner_pool_status is not None and miner_pool_status
                    == "Alive") or (miner_pool_stratum_active is not None
                                    and miner_pool_stratum_active == True):
                # pull pertinent information
                worker = miner_pool['User']
                # get the PORT as well, different pools/algos at different ports
                pool = miner_pool['URL'].split("//", 1)[-1]
                algo = get_algo(pool)
                break

        if (algo is None):
            algo = miner.hashrate[0]['algo']

        # get the coin address and worker
        coin_address, worker = parse_worker_string(miner, worker)

        # get all miner info for each miner
        # it is possible to have 20 miners, 5 miners per AUC per controller
        for i in range(20):
            try:
                miner_info = miner_stats['STATS'][0]['MM ID' + str(i + 1)]
                # this returns a chunky string for each device like:

                # "Ver[7411706-3162860] DNA[013cae6bfb1bb6c6] Elapsed[183] MW[2024 2024 2024 2002] LW[8074]
                #  MH[3 0 3 4] HW[10] DH[0.000%] Temp[38] TMax[93] Fan[4110] FanR[48%] Vi[1215 1215 1211 1210]
                #  Vo[4461 4447 4438 4438] GHSmm[7078.84] WU[88583.15] Freq[628.45] PG[15] Led[0]
                #  MW0[6 3 8 7 5 10 4 5 6 7 12 4 7 9 6 11 11 9 11 9 7 4] MW1[3 5 9 8 7 4 4 4 6 5 9 3 8 4 8 8 7 5 6 8 4 4]
                #  MW2[12 7 3 4 5 4 5 2 6 6 11 6 6 6 7 5 5 9 4 6 6 5] MW3[5 3 11 5 5 5 4 6 8 6 3 7 3 8 4 9 4 7 7 3 5 3]
                #  TA[88] ECHU[16 0 0 0] ECMM[0] FM[1] CRC[0 0 0 0] PAIRS[0 0 0] PVT_T[21-76/1-88/83 1-80/11-92/84 21-82/12-93/83 1-82/10-93/87]"

                # check out page 13 for a detailed explanation: https://canaan.io/wp-content/uploads/2018/05/Troubleshooting-and-repair-guide-for-AvalonMiner-models-721-741-761-821-and-841-release-v1.4-14.05.2018.pdf

                if miner_info is not None:
                    miner_info = miner_info + " _NULL_[1"  # add _NULL_ in order to split correctly
                    miners_info.append(
                        dict(x.split('[') for x in miner_info.split('] ')))
            except:
                pass

        # Now we have info on all the miners attached to this Avalon/AUC3 controller
        # Iterate through and process

        miner_int_id = 0
        miner_is_controller = False
        controller_ver = ''

        for info in miners_info:

            temps_chips = []
            fan_speeds = []
            device_hashrate_1_min = None

            # get the miner ID
            controller_ver = info.get('Ver')[:3]
            dna = info.get('DNA')
            miner_unique_id = "Avalon_" + controller_ver + " " + dna

            # does this miner exist?
            mi = get_object_by_unique_id(miner_unique_id)
            if mi is None:
                if miner.unique_id == miner.ip:
                    # later we will delete the CONTROLLER
                    miner_is_controller = True

                mi = create_object(miner.model_id, miner.ip, dna,
                                   miner_unique_id)

            if mi is not None:
                miners.append(mi)
                # set the last poll time
                mi.last_poll_time = current_milli_time()

            # get detailed HW info
            fan_speeds.append(int(info.get('Fan')))
            temp_intake = int(info.get('Temp'))
            temp_chips_max = int(info.get('TMax'))
            temps_chips.append(temp_chips_max)
            hw_errors = int(info.get('HW'))
            hashrate = info.get('GHSmm')
            total_working_chips = int(info.get('TA'))

            mcl = mi.chips.split(',')
            total_miner_chips = sum(
                [int(i) for i in mcl if type(i) == int or i.isdigit()])

            missing_chips = total_miner_chips - total_working_chips
            chip_stats = MinerChipStats(total_working_chips, 0, missing_chips,
                                        total_miner_chips)
            hw_error_rate_calc = (hw_errors / total_miner_chips) * 100

            try:
                if info.get('PVT_T0') is not None:
                    temps_chips.extend(info.get('PVT_T0').split(','))
                    temps_chips.extend(info.get('PVT_T1').split(','))
                    temps_chips.extend(info.get('PVT_T2').split(','))
                    temps_chips.extend(info.get('PVT_T3').split(','))
                    # finally convert these strings to ints
                    temps_chips = list(map(int, temps_chips))
            except:
                pass

            devs = miner_stats_devs['DEVS']
            for dev in devs:
                if dev['ID'] == miner_int_id:

                    # we can get lots of specific work and device info here
                    last_valid_work = dev.get('Last Valid Work')

                    # should we use our calculated hw_error_rate (above) or get direct from DEVs
                    hw_error_rate_direct = dev.get('Device Hardware%')

                    # maybe useful in the future
                    device_status = dev.get('Status')

                    # use the 1M, it is more accurate than the above average
                    hashrate = dev.get('MHS 1m') / 1000  # convert to GHS

                    shares_accepted = dev.get('Accepted')

                    # Device Uptime
                    elapsed_secs = dev.get('Device Elapsed')

                    # Determine IDLE STATE
                    if last_valid_work is not None:

                        # convert last share time to minutes (i.e. share cycles) and then compare and set if needed
                        last_share_minutes, last_share_seconds = get_total_minutes_seconds_from_timestamp(
                            last_valid_work)

                        # Seeing a situation where LastValidWork is not getting updated. Maybe a version issue.
                        # also adding a shares or 1-minute hashrate check

                        if last_share_minutes >= 1 and (hashrate == 0 or
                                                        shares_accepted == 0):
                            logger.debug("process_avalonminer() - miner=" +
                                         str(mi.id) + " - Miner is IDLE.")
                            mi.idle_cycles_count = mi.idle_cycles_count + 1
                        elif mi.idle_cycles_count > 1:
                            # reset it
                            mi.idle_cycles_count = 0

                    break

            # what are the hashrate units of this miner?
            hashrate_units = mi.hashrate[0]['units'].upper()

            try:
                # now, convert to GH/s which is what the normalized result handling requires
                hashrate_ghs = get_normalized_gigahash_per_sec_from_hashrate(
                    hashrate, hashrate_units)
            except:
                pass

            try:
                hw_error_rate = math_functions.get_formatted_float_rate(
                    hw_error_rate_direct, 4)
            except:
                hw_error_rate = math_functions.get_formatted_float_rate(
                    hw_error_rate_calc, 4)

            # Populate results FOR THIS MINER
            results.populate_miner_results(mi, elapsed_secs, worker, algo,
                                           pool, chip_stats, temps_chips,
                                           fan_speeds, hashrate_ghs,
                                           hw_error_rate)

            # increment the miner_int_id
            miner_int_id = miner_int_id + 1

        if miner_is_controller:
            miner.unique_id = "Avalon Controller " + controller_ver + " " + miner.ip
            miner.set_enabled(False)

        return elapsed_secs
    def execute(self):
        """
        Executes a Generic Dependency Check for Relations.

        Returns:
            True if runs successfully.
        """

        # create the eval and parsers
        parser = GenericArgumentParser(self.args)

        # create local copies of variables that may be needed for parameter evaluation
        object = self.object
        relation = self.relation
        check_id = parser.get('id')

        # create the Param Evaluator
        evaluator = BaseEvaluator(check_id, locals())
        evaluator.set_parser(parser)

        has_attr_check = parser.get('relation_hasattr')
        if has_attr_check is not None:
            if hasattr(relation, has_attr_check) is False:
                return False

        related_model = parser.get('related_model')
        if related_model is None or related_model.upper() == 'ANY':
            # all good
            pass
        else:
            if relation.model.model != related_model:
                return False

        # determine if there is a trip for STATE 1
        self.state_1_match = evaluator.evaluate_as_boolean(
            parser.get('test_for_state_1'))

        # determine if there is a trip for STATE 2
        self.state_2_match = evaluator.evaluate_as_boolean(
            parser.get('test_for_state_2'))

        if self.state_1_match is False and self.state_2_match is False:
            return False

        if self.has_repeat_delay_enabled() is False:

            # set the repeat delay
            self.set_repeat_delay()

            action = None

            # we can go through with the set actions
            if self.state_1_match:
                self.message = evaluator.parse_and_evaluate(
                    'message_on_state_1')
                action = self.args.get('behavior_on_state_1')
            elif self.state_2_match:
                self.message = evaluator.parse_and_evaluate(
                    'message_on_state_2')
                action = self.args.get('behavior_on_state_2')

            if self.message:
                logger.debug(self.message)

            if action:
                evaluator.parse_and_evaluate(action)

            # we are here, so it ran, and was a success
            return True

        else:

            # fail due to delay
            return False
예제 #20
0
    def poll(miner, results):

        global errors
        # Uses SG-MINER 5.6.6
        logger.debug("poll_baikal() - miner=" + str(miner.id))

        # get the DEVICE / CARD stats
        miner_stats = cgminer.get_baikal_devs(miner.ip)

        elapsed_secs = -1

        # if miner STILL not accessible
        status =  miner_stats['STATUS'][0]['STATUS']
        if status == 'error':
            errors = True
            results.inactive_objects.append(miner)

        else:

            worker = ""
            total_mh = 0
            Xs = 0
            Os = 0
            Ds = 0
            Ts = 0

            temps = []
            fan_speeds = []
            elapsed_secs = 0
            total_chips = 0

            pool_name = ""
            algo = ""
            worker = ""
            chips_list = 0

            # Get total number of chips according to miner's model
            # convert miner.model.chips to int list and sum
            try:
                chips_list = [int(y) for y in miner.chips.split(',')]
                Ts = sum(chips_list)
            except:
                if Ts == 0:
                    logger.debug("process_baikal() - miner=" + str(miner.id) + " - chips are EMPTY - check object properties.")

            # Get active pool
            miner_pools = cgminer.get_pools(miner.ip)
            for pool in miner_pools['POOLS']:
                if pool['Stratum Active'] == True:
                    worker = pool['User']
                    algo = pool['Algorithm']
                    #pool_name = pool['Name']
                    # get the PORT as well, different pools/algos at different ports
                    pool_name = pool['URL'].split("//",1)[-1]
                    break

            if algo == "":
                logger.warning("process_baikal() - miner=" + str(miner.id) + " - Miner is IDLE.")
                # increase idle cycles
                miner.idle_cycles_count = miner.idle_cycles_count + 1

            # get the coin address and worker
            coin_address, worker = parse_worker_string(miner, worker)

            # Get other miner stats (temps, chips, etc.)
            for board in miner_stats['DEVS']:

                if board['Enabled']=="Y":

                    # get the total MH
                    board_mh = float(board['MHS 5s'])
                    total_mh = total_mh + board_mh

                    # get the working chips
                    if board['Status']!="Alive" or board_mh == 0:
                        # we have a dead board! need to
                        Xs = Xs + chips_list[0]
                    else:
                        # this board is running
                        elapsed_secs = board['Device Elapsed']
                        Os = Os + chips_list[0]
                        board_temp = board['Temperature']
                        temps.append(board_temp)

            # convert to GigaHashes, since hashrate algo requires that to start
            hashrate_ghs = float(total_mh / 1000)

            # get the chip stats
            chip_stats = MinerChipStats(Os, Xs, Ds, Ts)

            # Get HW Errors
            summary_stats = cgminer.get_summary(miner.ip)

            # process error rate
            hw_error_rate = math_functions.get_formatted_float_rate(summary_stats['SUMMARY'][0]['Device Hardware%'], 4)

            # populate results
            results.populate_miner_results(miner, elapsed_secs, worker, algo, pool_name, chip_stats,
                                           temps, fan_speeds, hashrate_ghs, hw_error_rate)

        return elapsed_secs
예제 #21
0
    def poll(miner, results):

        logger.debug("poll_antminer() - miner=" + str(miner.id))

        elapsed_secs = -1
        last_share_time = 0

        # get the miner stats
        miner_stats = cgminer.get_antminer_stats(miner.ip)

        # if miner not accessible, try again!
        if miner_stats['STATUS'][0]['STATUS'] == 'error':
            miner_stats = cgminer.get_antminer_stats(miner.ip)

        # if miner STILL not accessible
        if miner_stats['STATUS'][0]['STATUS'] == 'error':
            results.inactive_objects.append(miner)

        else:

            # WORKER, POOL, ALGO, LAST SHARE TIME

            # retrieve and process the pool stats on the Miner
            miner.poll_pool_stats()

            # CHIPS, FANS, TEMPS

            Os = 0
            Xs = 0
            Ds = 0
            TsC = 0
            temps_chips = []
            temps_pcb = []
            fan_speeds = []
            miner_type = None

            # get total number of chips
            Ts = sum([int(y) for y in str(miner.chips).split(',')])

            try:
                miner_type = miner_stats['STATS'][0]['Type']
            except:
                pass

            # Get ASIC chip, temp, fan, counts, status, etc. all with a single loop
            for key in miner_stats['STATS'][1].keys():

                # FANS

                if "fan_num" in key:
                    # ignore for now
                    pass
                elif "fan" in key:
                    value = miner_stats['STATS'][1][key]
                    if (value>0):
                        fan_speeds.append(value)

                # TEMPS

                if "temp2_" in key:
                    value = miner_stats['STATS'][1][key]
                    if len(temps_chips)>0 and len(temps_pcb)==0:
                        temps_pcb = temps_chips
                        temps_chips = []
                    if (value>0):
                        temps_chips.append(value)
                elif "temp_num" in key:
                    # do not use the number of temps in the aggregate
                    pass
                elif "temp_max" in key:
                    # do not use max values in aggregate
                    # we may use this later!!
                    pass
                elif "temp" in key:
                    value = miner_stats['STATS'][1][key]
                    if (value>0):
                        temps_chips.append(value)

                # CHIPS

                if "chain_acn" in key:
                    value = miner_stats['STATS'][1][key]
                    # not used, but maybe in the future we will use it
                    TsC = TsC + value

                if "chain_acs" in key:
                    value = miner_stats['STATS'][1][key]
                    asic_chips = [value]
                    # good chips
                    O = [str(o).count('o') for o in asic_chips]
                    Os = sum(O) + Os
                    # bad chips
                    X = [str(x).count('x') for x in asic_chips]
                    Xs = sum(X) + Xs
                    # inactive (dash) chips
                    D = [str(x).count('-') for x in asic_chips]
                    Ds = sum(D) + Ds


            # summarize chip stats with an object
            chip_stats = MinerChipStats(Os, Xs, Ds, Ts)

            # HASHRATE

            # what are the hashrate units of this miner?
            hashrate_units = miner.hashrate[0]['units'].upper()
            hashrate = None

            try:
                # Get the current hashrate of the Antminer (usually stored in the 'GH/S 5s' or 'GH/S avg' params
                hashrate = float(str(miner_stats['STATS'][1]['GHS 5s']))
            except:
                pass

            if hashrate is None:
                try:
                    hashrate = float(str(miner_stats['STATS'][1]['MHS 5s']))
                except:
                    pass

            # now, convert to GH/s which is what the normalized result handling requires
            hashrate_ghs = get_normalized_gigahash_per_sec_from_hashrate(hashrate, hashrate_units)

            # Get HW Errors
            hw_error_rate = None

            try:
                hw_error_rate = math_functions.get_formatted_float_rate(miner_stats['STATS'][1]['Device Hardware%'], 4)
            except:
                pass

            if hw_error_rate is None:
                # we can get it from SUMMARY call
                summary_json = cgminer.get_summary(miner.ip)
                try:
                    summary = summary_json['SUMMARY'][0]
                    hw_error_rate = math_functions.get_formatted_float_rate(summary['Device Hardware%'], 4)
                except:
                    pass

            # UPTIME
            elapsed_secs = miner_stats['STATS'][1]['Elapsed']

            # Populate results
            results.populate_miner_results(miner, elapsed_secs, miner.worker, miner.algo, miner.pool, chip_stats,
                                           temps_chips, fan_speeds, hashrate_ghs, hw_error_rate)

        return elapsed_secs
예제 #22
0
    def execute(self):

        if self.object.admin_disabled:
            # no running Profitability checks on ADMIN Disabled devices
            return False

        # is it currently profitable (i.e. this last poll period)?
        profitability_results = self.results.get_result(
            self.object.id, 'profitability')

        # when was the last time we checked the profitability
        last_profitcheck_time = self.object.last_profitcheck_time

        # when is the NEXT time we are supposed to check profitability (set only on profitability FAILURE)
        next_recheck_time = self.object.next_profitcheck_time

        if last_profitcheck_time == 0:
            self.object.last_profitcheck_time = current_milli_time()
            return False

        number_samples = int(self.args.get('sample_count'))
        if number_samples > 30:
            # since we are working with RAW data here, and there are only 60 minutes of samples
            # we want to use half that so we have two datasets to compare profitability
            number_samples = 30

        if current_milli_time() < (last_profitcheck_time +
                                   (number_samples * 60000)):
            return False

        powered_on = self.object.is_powered_on()
        has_power_source = self.object.has_power_source()

        if next_recheck_time > 0 and self.object.profitable == 0:
            if powered_on == False and has_power_source and next_recheck_time < current_milli_time(
            ):
                # the appropriate TIME has passed since the Miner was powered DOWN
                # now it is time to power it back up and run the profitability checks for X samples
                self.object.next_profitcheck_time = 0
                return self.object.power_on("Starting Profitability Check")
            elif next_recheck_time > current_milli_time():
                # somehow we are powered ON but we are still not profitable and we are NOT supposed to check yet
                # so just adjust health score and get out of the check
                add_health_score(self.object, self.results,
                                 self.HEALTH_SCORE_ADJUSTMENT)
                return False

        # get the entire profitability dataset, if available (up to entire HOUR)
        period_start = current_milli_time() - (60 * 60000)

        # get the dataframe for 'profitability' for this object, return all RAW data (defaults to "HOUR")
        df = get_datamodel_data_by_object_id('CRYPTO_MINER', period_start,
                                             None, ['profitability'],
                                             [self.object.id], False)

        if df is None:
            logger.debug("No profit data collected yet for Miner {}".format(
                self.object.unique_id))
            return False
        else:
            # remove any NaNs just in case
            df.dropna(inplace=True)

        # set a baseline profitability target
        profit_target = 1.0  # 1 is "breakeven"
        pt = self.args.get('profit_target')
        if pt is not None:
            profit_target = float(pt)

        profit_mean = df.mean()[0]
        if math.isnan(profit_mean) or profitability_results is None:
            # there is no data in there.
            return False

        # get some basic stats for the debug log
        p_pct_current = (float(profitability_results) / profit_target * 100)
        p_pct_sample = (profit_mean / profit_target * 100)

        logger.debug(
            "Miner {} profitability: '{}%' (current) and '{}%' (period) of '{}' (target)"
            .format(self.object.unique_id, p_pct_current, p_pct_sample,
                    profit_target))

        notify_on_change = str_to_bool(self.args.get('notify_on_change'))
        message = None

        # now see if we have enough samples
        total_samples = len(df)
        logger.debug("{} samples in profit dataframe, need {}".format(
            total_samples, (number_samples * 2)))

        if total_samples >= ((number_samples * 2) - 1):

            # OK, now we are really checking, so set the last profitcheck time
            self.object.last_profitcheck_time = current_milli_time()

            df_samples = df.tail(number_samples)
            df_history = df.head(total_samples - number_samples)
            current_avg = df_samples.mean()[0]
            hist_avg = df_history.mean()[0]
            profitable_currently = (current_avg > profit_target)
            profitable_historically = (hist_avg > profit_target)

            powerdown_when_not_profitable = self.args.get(
                'powerdown_when_not_profitable')
            powerup_when_profitable = self.args.get('powerup_when_profitable')

            if profitable_currently and (profitable_historically is False
                                         or self.object.profitable == 0):

                # this is great, the unit is now profitable again - it was previously UNPROFITABLE
                message = "Miner is currently profitable!"

                if powered_on == False and has_power_source and powerup_when_profitable:
                    self.object.power_on(message)
                    message = None

                # reset profitability and recheck
                self.object.profitable = 1
                self.object.next_profitcheck_time = 0

            elif profitable_currently is False:

                if profitable_historically is True or self.object.profitable == 1:
                    message = "Miner has become unprofitable"
                else:
                    message = "Miner is not profitable"

                if powered_on and has_power_source and powerdown_when_not_profitable:
                    self.object.power_off(message)
                    message = None

                # set the next recheck time
                self.object.next_profitcheck_time = current_milli_time() + int(
                    self.args.get('recheck_delay'))

                # FLAG as unprofitable
                self.object.profitable = 0

                # add to the health score
                add_health_score(self.object, self.results,
                                 self.HEALTH_SCORE_ADJUSTMENT)

        # Finally, handle notification, if needed
        if message is not None:
            message_handler.log(None, message, self.object, logging.INFO,
                                notify_on_change)