def find_tags(self):

        """
        Find all RuuviTags.

        Returns:
            dict: MAC and state of found sensors

        """

        global BLUEZ_ERROR_DISPLAYED

        logger.debug("Try to find tags...")

        # This is the amount of time to listen for tags - TODO get from config file
        timeout = 10

        tags = {}
        tags_skip = {}

        macs = []
        start = current_milli_time()

        try:

            for data in RuuviTagSensor._get_ruuvitag_datas(macs, timeout):

                if current_milli_time() > (start+(timeout*1000)):
                    break

                if (data[0] in tags) or (data[0] in tags_skip):
                    continue

                logger.debug("Found TAG {}, DATA {}".format(data[0],data[1]))

                data_format = data[1]['data_format']

                if data_format < 4:
                    tags[data[0]] = data[1]
                else:
                    tags_skip[data[0]] = data[1]
                    logger.debug("Skipping data_format 4 tag - polling locks up thread")

        except:
            logger.error("error while finding tags")

        if tags is None or len(tags)==0:
            if not BLUEZ_ERROR_DISPLAYED:
                BLUEZ_ERROR_DISPLAYED = True
                logger.warning("No RuuviTags Found. Verify this is running on Raspian and "
                               "that you have installed BLUEZ: 'sudo apt-get install bluez-hcidump'")

        return tags
Beispiel #2
0
    def __wake_on_lan(self):

        # send the packet

        result = network.wake_on_lan(self)

        if result:

            # set the last send packet time
            self.last_wol_time = time_functions.current_milli_time()

            # reset the init state so state machine does not try to auto-powercycle, etc.
            self._reset_init_state()

            message = "Sent wake-on-lan packet -> {} (ID={}) on MAC {}".format(
                self.unique_id, self.id, self.mac)
            message_handler.log(self.results, message, self, self.log_level,
                                True)

        else:
            # Usually WOL will fail as devices must be configured, etc. ... not worth logging or notifying
            #logger.debug("WOL FAIL for object {} (ID={}) on MAC {}".format(self.unique_id, self.id, self.mac))
            pass

        return result
Beispiel #3
0
    def __powercycle(self):

        result = False

        pdu = self.__get_connected_pdu()

        if pdu:

            # was there an error message associated with this powercycle command?
            message = None

            try:
                message = self.results.get_last_error_message_by_object(
                    self.id)
            except:
                pass

            previous_state = self.power_state
            self.power_state = self.CONST_STATE_CYCLE

            # try to powercycle outlets

            if self.connected_outlet == "" and pdu.outlets == "1":
                # if the PDU only has a single outlet, and the "connected_outlet" parameter is not set, just do it
                self.connected_outlet = pdu.outlets

            # update the time
            self.last_powercycle_time = time_functions.current_milli_time()

            # increment the count
            self.powercycle_count = self.powercycle_count + 1

            result = pdu.powercycle_outlets(self.connected_outlet)

            if result is True:

                self.power_state = self.CONST_STATE_ON

                # reset the init state so state machine does not try to auto-powercycle, etc.
                self._reset_init_state()
                message = self._get_message("POWERCYCLE", None, self, message)

            else:

                # return to the previous state
                self.power_state = previous_state
                message = self._get_message("POWERCYCLE", "FAILED", self,
                                            message)

            message_handler.log(self.results, message, self, self.log_level,
                                self.notify_on_power_change)

        return result
Beispiel #4
0
    def restart(self):

        millis = time_functions.current_milli_time()

        if (millis - self.restart_delay) > self.last_restart_time:
            # we have not tried to restart in a while...
            self.last_restart_time = millis
            response = restart_claymore_miner(self.ip)

        # did the restart fail?
        restart_failed = response is None or response['STATUS'][0][
            'STATUS'] == 'error'

        return not restart_failed
Beispiel #5
0
    def restart_miner(self):
        """
        Attempts to restart the miner. This is a wrapper for "miner.restart()".
        The actual "restart" method must be implemented individually for each subclass of Miner.

        Returns:
            boolean

        """

        success = False

        # can we still ping this miner?
        if self.object_states.ping_error == 0:

            # we can still ping it, so attempt to restart miner
            logger.debug("Attempt to restart object {}".format(self.id))

            success = False
            self.__init_restart_delay(self.object)

            # should we restart it?
            restart = time_functions.get_timeout_expired(
                self.last_restart_time, self.restart_delay)

            if restart:

                # do the actual restart
                success = self.restart()

                # set the last restart attempt time
                self.last_restart_time = time_functions.current_milli_time()

                if success:

                    # reset the init state so state machine does not try to auto-powercycle, etc.
                    self._reset_init_state()

                    logger.info("Restarted Miner {}(ID={})".format(
                        self.object.unique_id, self.object.id))
                else:
                    # according to state machine diagram, if restart fails, powercycle (OR OFF)
                    success = self.powercycle()

        return success
Beispiel #6
0
    def powercycle(self):
        """
        Attempts to PowerCycle the object.

        Returns:
            True - if power cycled
        """

        # If we are simply powered OFF, then just power ON instead of going
        # through the entire set of checks and then turning OFF / ON

        if self.get_powered_state() == self.CONST_STATE_OFF:
            # just turn it ON
            return self.power_on()

        # BY DEFAULT, CHECK STATE MACHINE - and other PARAMETERS

        if not self.force_action:

            if hasattr(self, 'last_wol_time'):

                # Was this device recently WOL'd??
                # If so, we should wait a little time for the WOL packet to hit and for the system to come back up
                # if it has been a period of time after the WOL packet and the device is still not PINGABLE,
                # then we can try a powercycle

                try_powercycle = self.last_wol_time == 0 or \
                                 (self.last_wol_time > 0 and
                                  (self.last_wol_time + (DEVICE_WAKE_ON_LAN_TIMEOUT_MS / 2)) > time_functions.current_milli_time())

                if not try_powercycle:
                    logger.debug(
                        "{}(ID={}) was very recently sent WOL packet. "
                        "Waiting longer before POWERCYCLE attempt".format(
                            self.unique_id, self.id))
                    return False

            # have we waited enough time to clear the powercycle count?
            if self.powercycle_count > 0:

                max_time_expired = time_functions.get_timeout_expired(
                    self.last_powercycle_time,
                    self.powercycle_max_wait_time * 1000)

                if max_time_expired:
                    # we can clear them
                    self.powercycle_count = 0

            # have we power-cycled too many times?
            if self.powercycle_count >= self.powercycle_max_attempts:

                # should we disable due to too many power cycle attemps?
                disable = config_helpers.get_config_value(
                    "MISC", "device_disable_after_failed_powercycle", True)

                if disable:

                    powered_off = self.__poweroff("powercycled too many times")

                    if powered_off:

                        # disable the device
                        self.set_enabled(False)

                return powered_off

        # try to actually powercycle
        return self.__powercycle()
Beispiel #7
0
    def execute(self):

        if self.object.admin_disabled:
            # no running Profitability checks on ADMIN Disabled devices
            return False

        # is it currently profitable (i.e. this last poll period)?
        profitability_results = self.results.get_result(
            self.object.id, 'profitability')

        # when was the last time we checked the profitability
        last_profitcheck_time = self.object.last_profitcheck_time

        # when is the NEXT time we are supposed to check profitability (set only on profitability FAILURE)
        next_recheck_time = self.object.next_profitcheck_time

        if last_profitcheck_time == 0:
            self.object.last_profitcheck_time = current_milli_time()
            return False

        number_samples = int(self.args.get('sample_count'))
        if number_samples > 30:
            # since we are working with RAW data here, and there are only 60 minutes of samples
            # we want to use half that so we have two datasets to compare profitability
            number_samples = 30

        if current_milli_time() < (last_profitcheck_time +
                                   (number_samples * 60000)):
            return False

        powered_on = self.object.is_powered_on()
        has_power_source = self.object.has_power_source()

        if next_recheck_time > 0 and self.object.profitable == 0:
            if powered_on == False and has_power_source and next_recheck_time < current_milli_time(
            ):
                # the appropriate TIME has passed since the Miner was powered DOWN
                # now it is time to power it back up and run the profitability checks for X samples
                self.object.next_profitcheck_time = 0
                return self.object.power_on("Starting Profitability Check")
            elif next_recheck_time > current_milli_time():
                # somehow we are powered ON but we are still not profitable and we are NOT supposed to check yet
                # so just adjust health score and get out of the check
                add_health_score(self.object, self.results,
                                 self.HEALTH_SCORE_ADJUSTMENT)
                return False

        # get the entire profitability dataset, if available (up to entire HOUR)
        period_start = current_milli_time() - (60 * 60000)

        # get the dataframe for 'profitability' for this object, return all RAW data (defaults to "HOUR")
        df = get_datamodel_data_by_object_id('CRYPTO_MINER', period_start,
                                             None, ['profitability'],
                                             [self.object.id], False)

        if df is None:
            logger.debug("No profit data collected yet for Miner {}".format(
                self.object.unique_id))
            return False
        else:
            # remove any NaNs just in case
            df.dropna(inplace=True)

        # set a baseline profitability target
        profit_target = 1.0  # 1 is "breakeven"
        pt = self.args.get('profit_target')
        if pt is not None:
            profit_target = float(pt)

        profit_mean = df.mean()[0]
        if math.isnan(profit_mean) or profitability_results is None:
            # there is no data in there.
            return False

        # get some basic stats for the debug log
        p_pct_current = (float(profitability_results) / profit_target * 100)
        p_pct_sample = (profit_mean / profit_target * 100)

        logger.debug(
            "Miner {} profitability: '{}%' (current) and '{}%' (period) of '{}' (target)"
            .format(self.object.unique_id, p_pct_current, p_pct_sample,
                    profit_target))

        notify_on_change = str_to_bool(self.args.get('notify_on_change'))
        message = None

        # now see if we have enough samples
        total_samples = len(df)
        logger.debug("{} samples in profit dataframe, need {}".format(
            total_samples, (number_samples * 2)))

        if total_samples >= ((number_samples * 2) - 1):

            # OK, now we are really checking, so set the last profitcheck time
            self.object.last_profitcheck_time = current_milli_time()

            df_samples = df.tail(number_samples)
            df_history = df.head(total_samples - number_samples)
            current_avg = df_samples.mean()[0]
            hist_avg = df_history.mean()[0]
            profitable_currently = (current_avg > profit_target)
            profitable_historically = (hist_avg > profit_target)

            powerdown_when_not_profitable = self.args.get(
                'powerdown_when_not_profitable')
            powerup_when_profitable = self.args.get('powerup_when_profitable')

            if profitable_currently and (profitable_historically is False
                                         or self.object.profitable == 0):

                # this is great, the unit is now profitable again - it was previously UNPROFITABLE
                message = "Miner is currently profitable!"

                if powered_on == False and has_power_source and powerup_when_profitable:
                    self.object.power_on(message)
                    message = None

                # reset profitability and recheck
                self.object.profitable = 1
                self.object.next_profitcheck_time = 0

            elif profitable_currently is False:

                if profitable_historically is True or self.object.profitable == 1:
                    message = "Miner has become unprofitable"
                else:
                    message = "Miner is not profitable"

                if powered_on and has_power_source and powerdown_when_not_profitable:
                    self.object.power_off(message)
                    message = None

                # set the next recheck time
                self.object.next_profitcheck_time = current_milli_time() + int(
                    self.args.get('recheck_delay'))

                # FLAG as unprofitable
                self.object.profitable = 0

                # add to the health score
                add_health_score(self.object, self.results,
                                 self.HEALTH_SCORE_ADJUSTMENT)

        # Finally, handle notification, if needed
        if message is not None:
            message_handler.log(None, message, self.object, logging.INFO,
                                notify_on_change)
    def poll(miner, results):

        logger.debug("poll_avalonminer() - miner=" + str(miner.id))

        elapsed_secs = -1
        last_valid_work = 0

        if hasattr(miner, 'last_poll_time'):
            last_poll_time = miner.last_poll_time
            if ((current_milli_time() - last_poll_time) < 60000):
                # Do not poll it again, we handle all miners on the controller during the poll phase
                return True

        miners = []
        miners_info = []

        # get the miner stats
        miner_stats = cgminer.get_avalon_stats(miner.ip)

        # if miner not accessible
        if miner_stats['STATUS'][0]['STATUS'] is not 'S':
            results.inactive_objects.append(miner)
            return elapsed_secs

        # controller elapsed seconds
        elapsed_secs = miner_stats['STATS'][0]['Elapsed']

        # assuming all is good, get the devs and pools
        miner_stats_devs = cgminer.get_avalon_devs(miner.ip)
        miner_pools = cgminer.get_pools(miner.ip)

        # basic pool processing
        for miner_pool in miner_pools['POOLS']:
            miner_pool_status = miner_pool.get('Status')
            miner_pool_stratum_active = miner_pool.get('Stratum Active')

            if (miner_pool_status is not None and miner_pool_status
                    == "Alive") or (miner_pool_stratum_active is not None
                                    and miner_pool_stratum_active == True):
                # pull pertinent information
                worker = miner_pool['User']
                # get the PORT as well, different pools/algos at different ports
                pool = miner_pool['URL'].split("//", 1)[-1]
                algo = get_algo(pool)
                break

        if (algo is None):
            algo = miner.hashrate[0]['algo']

        # get the coin address and worker
        coin_address, worker = parse_worker_string(miner, worker)

        # get all miner info for each miner
        # it is possible to have 20 miners, 5 miners per AUC per controller
        for i in range(20):
            try:
                miner_info = miner_stats['STATS'][0]['MM ID' + str(i + 1)]
                # this returns a chunky string for each device like:

                # "Ver[7411706-3162860] DNA[013cae6bfb1bb6c6] Elapsed[183] MW[2024 2024 2024 2002] LW[8074]
                #  MH[3 0 3 4] HW[10] DH[0.000%] Temp[38] TMax[93] Fan[4110] FanR[48%] Vi[1215 1215 1211 1210]
                #  Vo[4461 4447 4438 4438] GHSmm[7078.84] WU[88583.15] Freq[628.45] PG[15] Led[0]
                #  MW0[6 3 8 7 5 10 4 5 6 7 12 4 7 9 6 11 11 9 11 9 7 4] MW1[3 5 9 8 7 4 4 4 6 5 9 3 8 4 8 8 7 5 6 8 4 4]
                #  MW2[12 7 3 4 5 4 5 2 6 6 11 6 6 6 7 5 5 9 4 6 6 5] MW3[5 3 11 5 5 5 4 6 8 6 3 7 3 8 4 9 4 7 7 3 5 3]
                #  TA[88] ECHU[16 0 0 0] ECMM[0] FM[1] CRC[0 0 0 0] PAIRS[0 0 0] PVT_T[21-76/1-88/83 1-80/11-92/84 21-82/12-93/83 1-82/10-93/87]"

                # check out page 13 for a detailed explanation: https://canaan.io/wp-content/uploads/2018/05/Troubleshooting-and-repair-guide-for-AvalonMiner-models-721-741-761-821-and-841-release-v1.4-14.05.2018.pdf

                if miner_info is not None:
                    miner_info = miner_info + " _NULL_[1"  # add _NULL_ in order to split correctly
                    miners_info.append(
                        dict(x.split('[') for x in miner_info.split('] ')))
            except:
                pass

        # Now we have info on all the miners attached to this Avalon/AUC3 controller
        # Iterate through and process

        miner_int_id = 0
        miner_is_controller = False
        controller_ver = ''

        for info in miners_info:

            temps_chips = []
            fan_speeds = []
            device_hashrate_1_min = None

            # get the miner ID
            controller_ver = info.get('Ver')[:3]
            dna = info.get('DNA')
            miner_unique_id = "Avalon_" + controller_ver + " " + dna

            # does this miner exist?
            mi = get_object_by_unique_id(miner_unique_id)
            if mi is None:
                if miner.unique_id == miner.ip:
                    # later we will delete the CONTROLLER
                    miner_is_controller = True

                mi = create_object(miner.model_id, miner.ip, dna,
                                   miner_unique_id)

            if mi is not None:
                miners.append(mi)
                # set the last poll time
                mi.last_poll_time = current_milli_time()

            # get detailed HW info
            fan_speeds.append(int(info.get('Fan')))
            temp_intake = int(info.get('Temp'))
            temp_chips_max = int(info.get('TMax'))
            temps_chips.append(temp_chips_max)
            hw_errors = int(info.get('HW'))
            hashrate = info.get('GHSmm')
            total_working_chips = int(info.get('TA'))

            mcl = mi.chips.split(',')
            total_miner_chips = sum(
                [int(i) for i in mcl if type(i) == int or i.isdigit()])

            missing_chips = total_miner_chips - total_working_chips
            chip_stats = MinerChipStats(total_working_chips, 0, missing_chips,
                                        total_miner_chips)
            hw_error_rate_calc = (hw_errors / total_miner_chips) * 100

            try:
                if info.get('PVT_T0') is not None:
                    temps_chips.extend(info.get('PVT_T0').split(','))
                    temps_chips.extend(info.get('PVT_T1').split(','))
                    temps_chips.extend(info.get('PVT_T2').split(','))
                    temps_chips.extend(info.get('PVT_T3').split(','))
                    # finally convert these strings to ints
                    temps_chips = list(map(int, temps_chips))
            except:
                pass

            devs = miner_stats_devs['DEVS']
            for dev in devs:
                if dev['ID'] == miner_int_id:

                    # we can get lots of specific work and device info here
                    last_valid_work = dev.get('Last Valid Work')

                    # should we use our calculated hw_error_rate (above) or get direct from DEVs
                    hw_error_rate_direct = dev.get('Device Hardware%')

                    # maybe useful in the future
                    device_status = dev.get('Status')

                    # use the 1M, it is more accurate than the above average
                    hashrate = dev.get('MHS 1m') / 1000  # convert to GHS

                    shares_accepted = dev.get('Accepted')

                    # Device Uptime
                    elapsed_secs = dev.get('Device Elapsed')

                    # Determine IDLE STATE
                    if last_valid_work is not None:

                        # convert last share time to minutes (i.e. share cycles) and then compare and set if needed
                        last_share_minutes, last_share_seconds = get_total_minutes_seconds_from_timestamp(
                            last_valid_work)

                        # Seeing a situation where LastValidWork is not getting updated. Maybe a version issue.
                        # also adding a shares or 1-minute hashrate check

                        if last_share_minutes >= 1 and (hashrate == 0 or
                                                        shares_accepted == 0):
                            logger.debug("process_avalonminer() - miner=" +
                                         str(mi.id) + " - Miner is IDLE.")
                            mi.idle_cycles_count = mi.idle_cycles_count + 1
                        elif mi.idle_cycles_count > 1:
                            # reset it
                            mi.idle_cycles_count = 0

                    break

            # what are the hashrate units of this miner?
            hashrate_units = mi.hashrate[0]['units'].upper()

            try:
                # now, convert to GH/s which is what the normalized result handling requires
                hashrate_ghs = get_normalized_gigahash_per_sec_from_hashrate(
                    hashrate, hashrate_units)
            except:
                pass

            try:
                hw_error_rate = math_functions.get_formatted_float_rate(
                    hw_error_rate_direct, 4)
            except:
                hw_error_rate = math_functions.get_formatted_float_rate(
                    hw_error_rate_calc, 4)

            # Populate results FOR THIS MINER
            results.populate_miner_results(mi, elapsed_secs, worker, algo,
                                           pool, chip_stats, temps_chips,
                                           fan_speeds, hashrate_ghs,
                                           hw_error_rate)

            # increment the miner_int_id
            miner_int_id = miner_int_id + 1

        if miner_is_controller:
            miner.unique_id = "Avalon Controller " + controller_ver + " " + miner.ip
            miner.set_enabled(False)

        return elapsed_secs