Esempio n. 1
0
 def test_address(self):
     # HVC test net
     self.assertIsNotNone(get_bcaddress_version('mp4hfsv6ESdc3jttjosmVBLdnnhL5wyKJm'))
     self.assertIsNotNone(get_bcaddress('mp4hfsv6ESdc3jttjosmVBLdnnhL5wyKJm'))
     # HVC main net
     self.assertIsNotNone(get_bcaddress_version('HR2KQ3qRWJ3yQhdivJLVVEScUg1EMp6EZH'))
     self.assertIsNotNone(get_bcaddress('HR2KQ3qRWJ3yQhdivJLVVEScUg1EMp6EZH'))
     # BTC,LTC... test net
     self.assertIsNotNone(get_bcaddress_version('nrJQ8mHB2AndBZiGT5m7ow7DgJeweCxT5n'))
     self.assertIsNotNone(get_bcaddress('nrJQ8mHB2AndBZiGT5m7ow7DgJeweCxT5n'))
     # BTC,LTC... test net
     self.assertIsNotNone(get_bcaddress_version('DGyiBd4UtcYB69dW1hL5TrySMUyPg1KSkg'))
     self.assertIsNotNone(get_bcaddress('DGyiBd4UtcYB69dW1hL5TrySMUyPg1KSkg'))
Esempio n. 2
0
    def _set_config(self, **kwargs):
        # A fast way to set defaults for the kwargs then set them as attributes
        self.config = dict(coinserv=None,
                           extranonce_serv_size=8,
                           extranonce_size=4,
                           diff1=0x0000FFFF00000000000000000000000000000000000000000000000000000000,
                           hashes_per_share=0xFFFF,
                           merged=None,
                           block_poll=0.2,
                           job_refresh=15,
                           rpc_ping_int=2,
                           pow_block_hash=False,
                           poll=None,
                           pool_address='',
                           signal=None)
        self.config.update(kwargs)

        if not get_bcaddress_version(self.config['pool_address']):
            self.logger.error("No valid pool address configured! Exiting.")
            exit()

        # check that we have at least one configured coin server
        if not self.config['main_coinservs']:
            self.logger.error("Shit won't work without a coinserver to connect to")
            exit()
Esempio n. 3
0
    def convert_username(self, username):
        # if the address they passed is a valid address,
        # use it. Otherwise use the pool address
        bits = username.split('.', 1)
        username = bits[0]
        worker = ''
        if len(bits) > 1:
            parsed_w = re.sub(r'[^a-zA-Z0-9\[\]_]+', '-', str(bits[1]))
            self.logger.debug("Registering worker name {}".format(parsed_w))
            worker = parsed_w[:16]
        try:
            version = get_bcaddress_version(username)
        except Exception:
            version = False

        if version:
            address = username
        else:
            filtered = re.sub('[\W_]+', '', username).lower()
            self.logger.debug(
                "Invalid address passed in, checking aliases against {}"
                .format(filtered))
            if filtered in self.config['aliases']:
                address = self.config['aliases'][filtered]
                self.logger.debug("Setting address alias to {}".format(address))
            else:
                address = self.config['donate_address']
                self.logger.debug("Falling back to donate address {}".format(address))

        return address, worker
Esempio n. 4
0
    def convert_username(self, username):
        # if the address they passed is a valid address,
        # use it. Otherwise use the pool address
        bits = username.split('.', 1)
        username = bits[0]
        worker = ''
        if len(bits) > 1:
            parsed_w = re.sub(r'[^a-zA-Z0-9\[\]_]+', '-', str(bits[1]))
            self.logger.debug("Registering worker name {}".format(parsed_w))
            worker = parsed_w[:16]
        try:
            version = get_bcaddress_version(username)
        except Exception:
            version = False

        if version:
            address = username
        else:
            filtered = re.sub('[\W_]+', '', username).lower()
            self.logger.debug(
                "Invalid address passed in, checking aliases against {}".
                format(filtered))
            if filtered in self.config['aliases']:
                address = self.config['aliases'][filtered]
                self.logger.debug(
                    "Setting address alias to {}".format(address))
            else:
                address = self.config['donate_address']
                self.logger.debug(
                    "Falling back to donate address {}".format(address))

        return address, worker
Esempio n. 5
0
    def _set_config(self, **kwargs):
        # A fast way to set defaults for the kwargs then set them as attributes
        self.config = dict(
            coinserv=None,
            extranonce_serv_size=8,
            extranonce_size=4,
            diff1=
            0x0000FFFF00000000000000000000000000000000000000000000000000000000,
            hashes_per_share=0xFFFF,
            merged=None,
            block_poll=0.2,
            job_refresh=15,
            rpc_ping_int=2,
            pow_block_hash=False,
            poll=None,
            pool_address='',
            signal=None)
        self.config.update(kwargs)

        if not get_bcaddress_version(self.config['pool_address']):
            self.logger.error("No valid pool address configured! Exiting.")
            exit()

        # check that we have at least one configured coin server
        if not self.config['main_coinservs']:
            self.logger.error(
                "Shit won't work without a coinserver to connect to")
            exit()
Esempio n. 6
0
    def pull_payouts(self, simulate=False):
        """ Gets all the unpaid payouts from the server """

        if simulate:
            self.logger.info('#' * 20 + ' Simulation mode ' + '#' * 20)

        try:
            payouts = self.post(
                'get_payouts', data={'currency':
                                     self.config['currency_code']})['pids']
        except ConnectionError:
            self.logger.warn('Unable to connect to SC!', exc_info=True)
            return

        if not payouts:
            self.logger.info("No {} payouts to process..".format(
                self.config['currency_code']))
            return

        repeat = 0
        new = 0
        invalid = 0
        for user, address, amount, pid in payouts:
            # Check address is valid
            if not get_bcaddress_version(
                    address) in self.config['valid_address_versions']:
                self.logger.warn(
                    "Ignoring payout {} due to invalid address. "
                    "{} address did not match a valid version {}".format(
                        (user, address, amount, pid),
                        self.config['currency_code'],
                        self.config['valid_address_versions']))
                invalid += 1
                continue
            # Check payout doesn't already exist
            if self.db.session.query(Payout).filter_by(pid=pid).first():
                self.logger.debug(
                    "Ignoring payout {} because it already exists"
                    " locally".format((user, address, amount, pid)))
                repeat += 1
                continue
            # Create local payout obj
            p = Payout(pid=pid,
                       user=user,
                       address=address,
                       amount=amount,
                       currency_code=self.config['currency_code'],
                       pull_time=datetime.datetime.utcnow())
            new += 1

            if not simulate:
                self.db.session.add(p)

        self.db.session.commit()

        self.logger.info(
            "Inserted {:,} new {} payouts and skipped {:,} old "
            "payouts from the server. {:,} payouts with invalid addresses.".
            format(new, self.config['currency_code'], repeat, invalid))
        return True
Esempio n. 7
0
    def pull_payouts(self, simulate=False):
        """ Gets all the unpaid payouts from the server """

        if simulate:
            self.logger.info('#'*20 + ' Simulation mode ' + '#'*20)

        try:
            payouts = self.post(
                'get_payouts',
                data={'currency': self.config['currency_code']}
            )['pids']
        except ConnectionError:
            self.logger.warn('Unable to connect to SC!', exc_info=True)
            return

        if not payouts:
            self.logger.info("No {} payouts to process.."
                             .format(self.config['currency_code']))
            return

        repeat = 0
        new = 0
        invalid = 0
        for user, address, amount, pid in payouts:
            # Check address is valid
            if not get_bcaddress_version(address) in self.config['valid_address_versions']:
                self.logger.warn("Ignoring payout {} due to invalid address. "
                                 "{} address did not match a valid version {}"
                                 .format((user, address, amount, pid),
                                         self.config['currency_code'],
                                         self.config['valid_address_versions']))
                invalid += 1
                continue
            # Check payout doesn't already exist
            if self.db.session.query(Payout).filter_by(pid=pid).first():
                self.logger.debug("Ignoring payout {} because it already exists"
                                  " locally".format((user, address, amount, pid)))
                repeat += 1
                continue
            # Create local payout obj
            p = Payout(pid=pid, user=user, address=address, amount=amount,
                       currency_code=self.config['currency_code'],
                       pull_time=datetime.datetime.utcnow())
            new += 1

            if not simulate:
                self.db.session.add(p)

        self.db.session.commit()

        self.logger.info("Inserted {:,} new {} payouts and skipped {:,} old "
                         "payouts from the server. {:,} payouts with invalid addresses."
                         .format(new, self.config['currency_code'], repeat, invalid))
        return True
Esempio n. 8
0
 def test_address(self):
     # HVC test net
     self.assertIsNotNone(
         get_bcaddress_version('mp4hfsv6ESdc3jttjosmVBLdnnhL5wyKJm'))
     self.assertIsNotNone(
         get_bcaddress('mp4hfsv6ESdc3jttjosmVBLdnnhL5wyKJm'))
     # HVC main net
     self.assertIsNotNone(
         get_bcaddress_version('HR2KQ3qRWJ3yQhdivJLVVEScUg1EMp6EZH'))
     self.assertIsNotNone(
         get_bcaddress('HR2KQ3qRWJ3yQhdivJLVVEScUg1EMp6EZH'))
     # BTC,LTC... test net
     self.assertIsNotNone(
         get_bcaddress_version('nrJQ8mHB2AndBZiGT5m7ow7DgJeweCxT5n'))
     self.assertIsNotNone(
         get_bcaddress('nrJQ8mHB2AndBZiGT5m7ow7DgJeweCxT5n'))
     # BTC,LTC... test net
     self.assertIsNotNone(
         get_bcaddress_version('DGyiBd4UtcYB69dW1hL5TrySMUyPg1KSkg'))
     self.assertIsNotNone(
         get_bcaddress('DGyiBd4UtcYB69dW1hL5TrySMUyPg1KSkg'))
Esempio n. 9
0
def setmerge_command(username, merged_type, merge_address):
    merged_cfg = current_app.config['merged_cfg'].get(merged_type, {})
    try:
        version = get_bcaddress_version(username)
    except Exception:
        version = False
    if (merge_address[0] != merged_cfg['prefix'] or not version):
            raise CommandException("Invalid {merged_type} address! {merged_type} addresses start with a(n) {}."
                                   .format(merged_cfg['prefix'], merged_type=merged_type))

    if not merged_cfg['enabled']:
        raise CommandException("Merged mining not enabled!")

    obj = MergeAddress(user=username, merge_address=merge_address,
                       merged_type=merged_type)
    db.session.merge(obj)
    db.session.commit()
Esempio n. 10
0
    def _set_config(self, **config):
        self.config = dict(aliases={},
                           vardiff=dict(spm_target=20,
                                        interval=30,
                                        tiers=[8, 16, 32, 64, 96, 128, 192, 256, 512]),
                           push_job_interval=30,
                           donate_address='',
                           idle_worker_threshold=300,
                           idle_worker_disconnect_threshold=3600,
                           agent=dict(enabled=False,
                                      port_diff=1111,
                                      timeout=120,
                                      accepted_types=['temp', 'status', 'hashrate', 'thresholds']))
        recursive_update(self.config, config)

        if not get_bcaddress_version(self.config['donate_address']):
            self.logger.error("No valid donation address configured! Exiting.")
            exit()
Esempio n. 11
0
    def __init__(self, config):
        NodeMonitorMixin.__init__(self)
        self._configure(config)
        if get_bcaddress_version(self.config['pool_address']) is None:
            raise ConfigurationError(
                "No valid pool address configured! Exiting.")

        # Since some MonitorNetwork objs are polling and some aren't....
        self.gl_methods = ['_monitor_nodes', '_check_new_jobs']

        # Aux network monitors (merged mining)
        self.auxmons = []

        # internal vars
        self._last_gbt = {}
        self._job_counter = 0  # a unique job ID counter

        # Currently active jobs keyed by their unique ID
        self.jobs = {}
        self.stale_jobs = deque([], maxlen=10)
        self.latest_job = None  # The last job that was generated
        self.new_job = Event()
        self.last_signal = 0.0

        # general current network stats
        self.current_net = dict(difficulty=None,
                                height=None,
                                last_block=0.0,
                                prev_hash=None,
                                transactions=None,
                                subsidy=None)
        self.block_stats = dict(accepts=0,
                                rejects=0,
                                solves=0,
                                last_solve_height=None,
                                last_solve_time=None,
                                last_solve_worker=None)
        self.recent_blocks = deque(maxlen=15)

        # Run the looping height poller if we aren't getting push notifications
        if (not self.config['signal']
                and self.config['poll'] is None) or self.config['poll']:
            self.gl_methods.append('_poll_height')
Esempio n. 12
0
    def _set_config(self, **config):
        self.config = dict(
            aliases={},
            vardiff=dict(spm_target=20,
                         interval=30,
                         tiers=[8, 16, 32, 64, 96, 128, 192, 256, 512]),
            push_job_interval=30,
            donate_address='',
            idle_worker_threshold=300,
            idle_worker_disconnect_threshold=3600,
            agent=dict(
                enabled=False,
                port_diff=1111,
                timeout=120,
                accepted_types=['temp', 'status', 'hashrate', 'thresholds']))
        recursive_update(self.config, config)

        if not get_bcaddress_version(self.config['donate_address']):
            self.logger.error("No valid donation address configured! Exiting.")
            exit()
Esempio n. 13
0
    def __init__(self, config):
        NodeMonitorMixin.__init__(self)
        self._configure(config)
        if get_bcaddress_version(self.config['pool_address']) is None:
            raise ConfigurationError("No valid pool address configured! Exiting.")

        # Since some MonitorNetwork objs are polling and some aren't....
        self.gl_methods = ['_monitor_nodes', '_check_new_jobs']

        # Aux network monitors (merged mining)
        self.auxmons = []

        # internal vars
        self._last_gbt = {}
        self._job_counter = 0  # a unique job ID counter

        # Currently active jobs keyed by their unique ID
        self.jobs = {}
        self.stale_jobs = deque([], maxlen=10)
        self.latest_job = None  # The last job that was generated
        self.new_job = Event()
        self.last_signal = 0.0

        # general current network stats
        self.current_net = dict(difficulty=None,
                                height=None,
                                last_block=0.0,
                                prev_hash=None,
                                transactions=None,
                                subsidy=None)
        self.block_stats = dict(accepts=0,
                                rejects=0,
                                solves=0,
                                last_solve_height=None,
                                last_solve_time=None,
                                last_solve_worker=None)
        self.recent_blocks = deque(maxlen=15)

        # Run the looping height poller if we aren't getting push notifications
        if (not self.config['signal'] and self.config['poll'] is None) or self.config['poll']:
            self.gl_methods.append('_poll_height')
Esempio n. 14
0
    def convert_username(self, username):
        # if the address they passed is a valid address,
        # use it. Otherwise use the pool address
        bits = username.split('.', 1)
        username = bits[0].strip()
        worker = '0'
        if len(bits) > 1:
            parsed_w = re.sub(r'[^a-zA-Z0-9\[\]_]+', '-', str(bits[1]))
            self.logger.debug("Registering worker name {}".format(parsed_w))
            worker = parsed_w[:16]

        try:
            version = get_bcaddress_version(username)
        except Exception:
            version = False

        if self.config['valid_address_versions'] and version not in self.config[
                'valid_address_versions']:
            version = False

        if isinstance(version, int) and version is not False:
            address = username
        else:
            # Filter all except underscores and letters
            filtered = re.sub('[\W_]+', '', username).lower()
            self.logger.debug(
                "Invalid address passed in, checking aliases against {}".
                format(filtered))
            if filtered in self.config['aliases']:
                address = self.config['aliases'][filtered]
                self.logger.debug(
                    "Setting address alias to {}".format(address))
            else:
                # address = self.config['donate_key']
                address = self.server.jobmanager.config['pool_address']
                self.logger.debug(
                    "Falling back to pool_address {}".format(address))
        return address, worker
Esempio n. 15
0
    def proc_trans(self, simulate=False, merged=None, datadir=None):
        logger.info("Running payouts for merged = {}".format(merged))
        if merged:
            conn = merge_coinserv[merged]
            valid_address_versions = current_app.config['merged_cfg'][merged][
                'address_version']
        else:
            conn = coinserv
            valid_address_versions = current_app.config['address_version']
        self.poke_rpc(conn)

        lock = True
        if simulate:
            lock = False

        payouts, bonus_payouts, lock_res = self.post('get_payouts',
                                                     data={
                                                         'lock': lock,
                                                         'merged': merged
                                                     })
        if lock:
            assert lock_res

        pids = [t[2] for t in payouts]
        bids = [t[2] for t in bonus_payouts]

        if not len(pids) and not len(bids):
            logger.info("No payouts to process..")
            return

        if not simulate:
            backup_fname = os.path.join(
                os.path.abspath(datadir),
                'locked_ids.{}'.format(int(time.time())))
            fo = open(backup_fname, 'w')
            json.dump(dict(pids=pids, bids=bids), fo)
            fo.close()
            logger.info(
                "Locked pid information stored at {0}. Call sc_rpc "
                "reset_trans_file {0} to reset these transactions".format(
                    backup_fname))

        logger.info(
            "Recieved {:,} payouts and {:,} bonus payouts from the server".
            format(len(pids), len(bids)))

        # builds two dictionaries, one that tracks the total payouts to a user,
        # and another that tracks all the payout ids (pids) giving that amount
        # to the user
        totals = {}
        pids = {}
        bids = {}
        for user, amount, id in payouts:
            if get_bcaddress_version(user) in valid_address_versions:
                totals.setdefault(user, 0)
                totals[user] += amount
                pids.setdefault(user, [])
                pids[user].append(id)
            else:
                logger.warn(
                    "User {} has been excluded due to invalid address".format(
                        user))

        for user, amount, id in bonus_payouts:
            if get_bcaddress_version(user) in valid_address_versions:
                totals.setdefault(user, 0)
                totals[user] += amount
                bids.setdefault(user, [])
                bids[user].append(id)
            else:
                logger.warn(
                    "User {} has been excluded due to invalid address".format(
                        user))

        # identify the users who meet minimum payout and format for sending
        # to rpc
        users = {
            user: amount / float(100000000)
            for user, amount in totals.iteritems()
            if amount > current_app.config['minimum_payout']
        }
        logger.info("Trying to payout a total of {}".format(sum(
            users.values())))

        if len(users) == 0:
            logger.info("Nobody has a big enough balance to pay out...")
            return

        # now we have all the users who we're going to send money. build a list
        # of the pids that will be being paid in this transaction
        committed_pids = []
        for user in users:
            committed_pids.extend(pids.get(user, []))
        committed_bids = []
        for user in users:
            committed_bids.extend(bids.get(user, []))

        logger.info("Total user payouts")
        logger.info(users)

        logger.debug("Total bonus IDs")
        logger.debug(bids)
        logger.debug("Total payout IDs")
        logger.debug(pids)

        logger.info("List of payout ids to be committed")
        logger.info(committed_pids)
        logger.info("List of bonus payout ids to be committed")
        logger.info(committed_bids)

        if simulate:
            logger.info("Just kidding, we're simulating... Exit.")
            exit(0)

        try:
            # now actually pay them
            coin_txid = payout_many(users, merged=merged)
            #coin_txid = "1111111111111111111111111111111111111111111111111111111111111111"
        except CoinRPCException as e:
            if isinstance(
                    e.error,
                    dict) and e.error.get('message') == 'Insufficient funds':
                logger.error("Insufficient funds, reseting...")
                self.reset_trans(pids, bids)
            else:
                logger.error(
                    "Unkown RPC error, you'll need to manually reset the payouts",
                    exc_info=True)

        else:
            associated = False
            try:
                logger.info(
                    "Got {} as txid for payout, now pushing result to server!".
                    format(coin_txid))

                retries = 0
                while retries < 5:
                    try:
                        if self.associate_trans(committed_pids,
                                                committed_bids,
                                                coin_txid,
                                                merged=merged):
                            logger.info(
                                "Recieved success response from the server.")
                            associated = True
                            break
                    except Exception:
                        logger.error(
                            "Server returned failure response, retrying "
                            "{} more times.".format(4 - retries),
                            exc_info=True)
                    retries += 1
                    time.sleep(15)
            finally:
                if not associated:
                    backup_fname = os.path.join(
                        os.path.abspath(datadir),
                        'associated_ids.{}'.format(int(time.time())))
                    fo = open(backup_fname, 'w')
                    json.dump(
                        dict(pids=committed_pids,
                             bids=committed_bids,
                             transaction_id=coin_txid,
                             merged=merged), fo)
                    fo.close()
                    logger.info(
                        "Failed transaction_id association data stored in {0}. Call sc_rpc "
                        "associate_trans_file {0} to retry manually".format(
                            backup_fname))
Esempio n. 16
0
    def proc_trans(self, simulate=False, merged=None, datadir=None):
        logger.info("Running payouts for merged = {}".format(merged))
        if merged:
            conn = merge_coinserv[merged]
            valid_address_versions = current_app.config['merged_cfg'][merged]['address_version']
        else:
            conn = coinserv
            valid_address_versions = current_app.config['address_version']
        self.poke_rpc(conn)

        lock = True
        if simulate:
            lock = False

        payouts, bonus_payouts, lock_res = self.post(
            'get_payouts',
            data={'lock': lock, 'merged': merged}
        )
        if lock:
            assert lock_res

        pids = [t[2] for t in payouts]
        bids = [t[2] for t in bonus_payouts]

        if not len(pids) and not len(bids):
            logger.info("No payouts to process..")
            return

        if not simulate:
            backup_fname = os.path.join(os.path.abspath(datadir),
                                        'locked_ids.{}'.format(int(time.time())))
            fo = open(backup_fname, 'w')
            json.dump(dict(pids=pids, bids=bids), fo)
            fo.close()
            logger.info("Locked pid information stored at {0}. Call sc_rpc "
                        "reset_trans_file {0} to reset these transactions"
                        .format(backup_fname))

        logger.info("Recieved {:,} payouts and {:,} bonus payouts from the server"
                    .format(len(pids), len(bids)))

        # builds two dictionaries, one that tracks the total payouts to a user,
        # and another that tracks all the payout ids (pids) giving that amount
        # to the user
        totals = {}
        pids = {}
        bids = {}
        for user, amount, id in payouts:
            if get_bcaddress_version(user) in valid_address_versions:
                totals.setdefault(user, 0)
                totals[user] += amount
                pids.setdefault(user, [])
                pids[user].append(id)
            else:
                logger.warn("User {} has been excluded due to invalid address"
                            .format(user))

        for user, amount, id in bonus_payouts:
            if get_bcaddress_version(user) in valid_address_versions:
                totals.setdefault(user, 0)
                totals[user] += amount
                bids.setdefault(user, [])
                bids[user].append(id)
            else:
                logger.warn("User {} has been excluded due to invalid address"
                            .format(user))

        # identify the users who meet minimum payout and format for sending
        # to rpc
        users = {user: amount / float(100000000) for user, amount in totals.iteritems()
                 if amount > current_app.config['minimum_payout']}
        logger.info("Trying to payout a total of {}".format(sum(users.values())))

        if len(users) == 0:
            logger.info("Nobody has a big enough balance to pay out...")
            return

        # now we have all the users who we're going to send money. build a list
        # of the pids that will be being paid in this transaction
        committed_pids = []
        for user in users:
            committed_pids.extend(pids.get(user, []))
        committed_bids = []
        for user in users:
            committed_bids.extend(bids.get(user, []))

        logger.info("Total user payouts")
        logger.info(users)

        logger.debug("Total bonus IDs")
        logger.debug(bids)
        logger.debug("Total payout IDs")
        logger.debug(pids)

        logger.info("List of payout ids to be committed")
        logger.info(committed_pids)
        logger.info("List of bonus payout ids to be committed")
        logger.info(committed_bids)

        if simulate:
            logger.info("Just kidding, we're simulating... Exit.")
            exit(0)

        try:
            # now actually pay them
            coin_txid = payout_many(users, merged=merged)
            #coin_txid = "1111111111111111111111111111111111111111111111111111111111111111"
        except CoinRPCException as e:
            if isinstance(e.error, dict) and e.error.get('message') == 'Insufficient funds':
                logger.error("Insufficient funds, reseting...")
                self.reset_trans(pids, bids)
            else:
                logger.error("Unkown RPC error, you'll need to manually reset the payouts", exc_info=True)

        else:
            associated = False
            try:
                logger.info("Got {} as txid for payout, now pushing result to server!"
                            .format(coin_txid))

                retries = 0
                while retries < 5:
                    try:
                        if self.associate_trans(committed_pids, committed_bids, coin_txid, merged=merged):
                            logger.info("Recieved success response from the server.")
                            associated = True
                            break
                    except Exception:
                        logger.error("Server returned failure response, retrying "
                                     "{} more times.".format(4 - retries), exc_info=True)
                    retries += 1
                    time.sleep(15)
            finally:
                if not associated:
                    backup_fname = os.path.join(os.path.abspath(datadir),
                                                'associated_ids.{}'.format(int(time.time())))
                    fo = open(backup_fname, 'w')
                    json.dump(dict(pids=committed_pids,
                                   bids=committed_bids,
                                   transaction_id=coin_txid,
                                   merged=merged), fo)
                    fo.close()
                    logger.info("Failed transaction_id association data stored in {0}. Call sc_rpc "
                                "associate_trans_file {0} to retry manually".format(backup_fname))
Esempio n. 17
0
 def testb58(self):
     assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
     _ohai = 'o hai'.encode('ascii')
     _tmp = b58encode(_ohai)
     assert _tmp == 'DYB3oMS'
     assert b58decode(_tmp, 5) == _ohai
Esempio n. 18
0
def main():
    parser = argparse.ArgumentParser(description='Run powerpool!')
    parser.add_argument('config',
                        type=argparse.FileType('r'),
                        help='yaml configuration file to run with')
    args = parser.parse_args()

    # implement some defaults, these are all explained in the example
    # configuration file
    config = dict(
        stratum={
            'port': 3333,
            'address': '0.0.0.0'
        },
        procname='powerpool',
        coinserv=[],
        extranonce_serv_size=8,
        extranonce_size=4,
        diff1=
        0x0000FFFF00000000000000000000000000000000000000000000000000000000,
        loggers=[{
            'type': 'StreamHandler',
            'level': 'DEBUG'
        }],
        start_difficulty=128,
        term_timeout=3,
        merged=[{
            'enabled': False,
            'work_interval': 1
        }],
        monitor={
            'DEBUG': False,
            'address': '127.0.0.1',
            'port': 3855,
            'enabled': True
        },
        agent={
            'address': '0.0.0.0',
            'port': 4444,
            'timeout': 120,
            'enabled': False,
            'accepted_types': ['temp', 'status', 'hashrate', 'thresholds']
        },
        pow_func='ltc_scrypt',
        aliases={},
        block_poll=0.2,
        job_generate_int=75,
        rpc_ping_int=2,
        keep_share=600,
        send_new_block=True,
        vardiff={
            'enabled': False,
            'historesis': 1.5,
            'interval': 400,
            'spm_target': 2.5,
            'tiers': [8, 16, 32, 64, 96, 128, 192, 256, 512]
        },
        celery={'CELERY_DEFAULT_QUEUE': 'celery'},
        push_job_interval=30,
        celery_task_prefix=None)
    # override those defaults with a loaded yaml config
    add_config = yaml.load(args.config) or {}

    def update(d, u):
        """ Simple recursive dictionary update """
        for k, v in u.iteritems():
            if isinstance(v, collections.Mapping):
                r = update(d.get(k, {}), v)
                d[k] = r
            else:
                d[k] = u[k]
        return d

    update(config, add_config)

    # setup our celery agent
    celery = Celery()
    celery.conf.update(config['celery'])

    # monkey patch the celery object to make sending tasks easy
    def send_task_pp(self, name, *args, **kwargs):
        self.send_task(config['celery_task_prefix'] + '.' + name, args, kwargs)

    Celery.send_task_pp = send_task_pp

    # stored state of all greenlets. holds events that can be triggered, etc
    stratum_clients = {'addr_worker_lut': {}, 'address_lut': {}}

    # all the agent connections
    agent_clients = {}

    # the network monitor stores the current coin network state here
    net_state = {
        # rpc connections in either state
        'poll_connection': None,
        'live_connections': [],
        'down_connections': [],
        # index of all jobs currently accepting work. Contains complete
        # block templates
        'jobs': {},
        # the job that should be sent to clients needing work
        'latest_job': None,
        'job_counter': 0,
        'work': {
            'difficulty': None,
            'height': None,
            'block_solve': None,
            'work_restarts': 0,
            'new_jobs': 0,
            'rejects': 0,
            'accepts': 0,
            'solves': 0,
            'recent_blocks': deque(maxlen=15)
        },
        'merged_work': {}
    }

    # holds counters, timers, etc that have to do with overall server state
    server_state = {
        'server_start': datetime.datetime.utcnow(),
        'block_solve': None,
        'aux_state': {},
        'shares': StatManager(),
        'reject_low': StatManager(),
        'reject_dup': StatManager(),
        'reject_stale': StatManager(),
        'stratum_connects': StatManager(),
        'stratum_disconnects': StatManager(),
        'agent_connects': StatManager(),
        'agent_disconnects': StatManager(),
    }

    exit_event = threading.Event()

    for log_cfg in config['loggers']:
        ch = getattr(logging, log_cfg['type'])()
        log_level = getattr(logging, log_cfg['level'].upper())
        ch.setLevel(log_level)
        fmt = log_cfg.get(
            'format', '%(asctime)s [%(name)s] [%(levelname)s] %(message)s')
        formatter = logging.Formatter(fmt)
        ch.setFormatter(formatter)
        keys = log_cfg.get('listen', [
            'stats', 'stratum_server', 'netmon', 'manager', 'monitor', 'agent'
        ])
        for key in keys:
            log = logging.getLogger(key)
            log.addHandler(ch)
            log.setLevel(log_level)

    logger.info("=" * 80)
    logger.info("PowerPool stratum server ({}) starting up...".format(
        config['procname']))
    logger.debug(pformat(config))

    setproctitle.setproctitle(config['procname'])

    # setup the pow function
    if config['pow_func'] == 'ltc_scrypt':
        from cryptokit.block import scrypt_int
        config['pow_func'] = scrypt_int
    elif config['pow_func'] == 'vert_scrypt':
        from cryptokit.block import vert_scrypt_int
        config['pow_func'] = vert_scrypt_int
    elif config['pow_func'] == 'darkcoin':
        from cryptokit.block import drk_hash_int
        config['pow_func'] = drk_hash_int
    elif config['pow_func'] == 'sha256':
        from cryptokit.block import sha256_int
        config['pow_func'] = sha256_int
    else:
        logger.error("pow_func option not valid!")
        exit()

    # check that config has a valid address
    if (not get_bcaddress_version(config['pool_address'])
            or not get_bcaddress_version(config['donate_address'])):
        logger.error("No valid donation/pool address configured! Exiting.")
        exit()

    # check that we have at least one configured coin server
    if not config['coinserv']:
        logger.error("Shit won't work without a coinserver to connect to")
        exit()

    # check that we have at least one configured coin server
    if not config['celery_task_prefix']:
        logger.error("You need to specify a celery prefix")
        exit()

    threads = []
    # the thread that monitors the network for new jobs and blocks
    net_thread = threading.Thread(target=net_runner,
                                  args=(net_state, config, stratum_clients,
                                        server_state, celery, exit_event))
    net_thread.daemon = True
    threads.append(net_thread)
    net_thread.start()

    # stratum thread. interacts with clients. sends them jobs and accepts work
    stratum_thread = threading.Thread(target=stratum_runner,
                                      args=(net_state, config, stratum_clients,
                                            server_state, celery, exit_event))
    stratum_thread.daemon = True
    threads.append(stratum_thread)
    stratum_thread.start()

    # task in charge of rotating stats as needed
    stat_thread = threading.Thread(target=stat_runner,
                                   args=(server_state, celery, exit_event))
    stat_thread.daemon = True
    threads.append(stat_thread)
    stat_thread.start()

    # the agent server. allows peers to connect and send stat data about
    # a stratum worker
    if config['agent']['enabled']:
        agent_thread = threading.Thread(target=agent_runner,
                                        args=(config, stratum_clients,
                                              agent_clients, server_state,
                                              celery, exit_event))
        agent_thread.daemon = True
        threads.append(agent_thread)
        agent_thread.start()

    # the monitor server. a simple flask http server that lets you view
    # internal data structures to monitor server health
    if config['monitor']['enabled']:
        monitor_thread = threading.Thread(target=monitor_runner,
                                          args=(net_state, config,
                                                stratum_clients, server_state,
                                                agent_clients, exit_event))
        monitor_thread.daemon = True
        threads.append(monitor_thread)
        monitor_thread.start()

    try:
        while True:
            for thread in threads:
                thread.join(0.2)
    except KeyboardInterrupt:
        exit_event.set()
        logger.info("Exiting requested via SIGINT, cleaning up...")
        try:
            net_thread.join(config['term_timeout'])
            stratum_thread.join(config['term_timeout'])
            if net_thread.isAlive() or stratum_thread.isAlive():
                logger.info("Timeout reached, exiting without cleanup")
            else:
                logger.info("Cleanup complete, shutting down...")
        except KeyboardInterrupt:
            logger.info("Shutdown forced by system, exiting without cleanup")

        logger.info("=" * 80)
Esempio n. 19
0
def main():
    parser = argparse.ArgumentParser(description='Run powerpool!')
    parser.add_argument('config', type=argparse.FileType('r'),
                        help='yaml configuration file to run with')
    args = parser.parse_args()

    # implement some defaults, these are all explained in the example
    # configuration file
    config = dict(stratum={'port': 3333, 'address': '0.0.0.0'},
                  procname='powerpool',
                  coinserv=[],
                  extranonce_serv_size=8,
                  extranonce_size=4,
                  diff1=0x0000FFFF00000000000000000000000000000000000000000000000000000000,
                  loggers=[{'type': 'StreamHandler',
                            'level': 'DEBUG'}],
                  start_difficulty=128,
                  term_timeout=3,
                  merged=[{'enabled': False,
                          'work_interval': 1}],
                  monitor={'DEBUG': False,
                           'address': '127.0.0.1',
                           'port': 3855,
                           'enabled': True},
                  agent={'address': '0.0.0.0',
                         'port': 4444,
                         'timeout': 120,
                         'enabled': False,
                         'accepted_types': ['temp', 'status', 'hashrate', 'thresholds']},
                  pow_func='ltc_scrypt',
                  aliases={},
                  block_poll=0.2,
                  job_generate_int=75,
                  rpc_ping_int=2,
                  keep_share=600,
                  send_new_block=True,
                  vardiff={'enabled': False,
                           'historesis': 1.5,
                           'interval': 400,
                           'spm_target': 2.5,
                           'tiers': [8, 16, 32, 64, 96, 128, 192, 256, 512]},
                  celery={'CELERY_DEFAULT_QUEUE': 'celery'},
                  push_job_interval=30,
                  celery_task_prefix=None)
    # override those defaults with a loaded yaml config
    add_config = yaml.load(args.config) or {}

    def update(d, u):
        """ Simple recursive dictionary update """
        for k, v in u.iteritems():
            if isinstance(v, collections.Mapping):
                r = update(d.get(k, {}), v)
                d[k] = r
            else:
                d[k] = u[k]
        return d
    update(config, add_config)

    # setup our celery agent
    celery = Celery()
    celery.conf.update(config['celery'])

    # monkey patch the celery object to make sending tasks easy
    def send_task_pp(self, name, *args, **kwargs):
        self.send_task(config['celery_task_prefix'] + '.' + name, args, kwargs)
    Celery.send_task_pp = send_task_pp

    # stored state of all greenlets. holds events that can be triggered, etc
    stratum_clients = {'addr_worker_lut': {}, 'address_lut': {}}

    # all the agent connections
    agent_clients = {}

    # the network monitor stores the current coin network state here
    net_state = {
        # rpc connections in either state
        'poll_connection': None,
        'live_connections': [],
        'down_connections': [],
        # index of all jobs currently accepting work. Contains complete
        # block templates
        'jobs': {},
        # the job that should be sent to clients needing work
        'latest_job': None,
        'job_counter': 0,
        'work': {'difficulty': None,
                 'height': None,
                 'block_solve': None,
                 'work_restarts': 0,
                 'new_jobs': 0,
                 'rejects': 0,
                 'accepts': 0,
                 'solves': 0,
                 'recent_blocks': deque(maxlen=15)},
        'merged_work': {}
    }

    # holds counters, timers, etc that have to do with overall server state
    server_state = {
        'server_start': datetime.datetime.utcnow(),
        'block_solve': None,
        'aux_state': {},
        'shares': StatManager(),
        'reject_low': StatManager(),
        'reject_dup': StatManager(),
        'reject_stale': StatManager(),
        'stratum_connects': StatManager(),
        'stratum_disconnects': StatManager(),
        'agent_connects': StatManager(),
        'agent_disconnects': StatManager(),
    }

    exit_event = threading.Event()

    for log_cfg in config['loggers']:
        ch = getattr(logging, log_cfg['type'])()
        log_level = getattr(logging, log_cfg['level'].upper())
        ch.setLevel(log_level)
        fmt = log_cfg.get('format', '%(asctime)s [%(name)s] [%(levelname)s] %(message)s')
        formatter = logging.Formatter(fmt)
        ch.setFormatter(formatter)
        keys = log_cfg.get('listen', ['stats', 'stratum_server', 'netmon',
                                      'manager', 'monitor', 'agent'])
        for key in keys:
            log = logging.getLogger(key)
            log.addHandler(ch)
            log.setLevel(log_level)

    logger.info("=" * 80)
    logger.info("PowerPool stratum server ({}) starting up..."
                .format(config['procname']))
    logger.debug(pformat(config))

    setproctitle.setproctitle(config['procname'])

    # setup the pow function
    if config['pow_func'] == 'ltc_scrypt':
        from cryptokit.block import scrypt_int
        config['pow_func'] = scrypt_int
    elif config['pow_func'] == 'vert_scrypt':
        from cryptokit.block import vert_scrypt_int
        config['pow_func'] = vert_scrypt_int
    elif config['pow_func'] == 'darkcoin':
        from cryptokit.block import drk_hash_int
        config['pow_func'] = drk_hash_int
    elif config['pow_func'] == 'sha256':
        from cryptokit.block import sha256_int
        config['pow_func'] = sha256_int
    else:
        logger.error("pow_func option not valid!")
        exit()

    # check that config has a valid address
    if (not get_bcaddress_version(config['pool_address']) or
            not get_bcaddress_version(config['donate_address'])):
        logger.error("No valid donation/pool address configured! Exiting.")
        exit()

    # check that we have at least one configured coin server
    if not config['coinserv']:
        logger.error("Shit won't work without a coinserver to connect to")
        exit()

    # check that we have at least one configured coin server
    if not config['celery_task_prefix']:
        logger.error("You need to specify a celery prefix")
        exit()

    threads = []
    # the thread that monitors the network for new jobs and blocks
    net_thread = threading.Thread(target=net_runner, args=(
        net_state, config, stratum_clients, server_state, celery, exit_event))
    net_thread.daemon = True
    threads.append(net_thread)
    net_thread.start()

    # stratum thread. interacts with clients. sends them jobs and accepts work
    stratum_thread = threading.Thread(target=stratum_runner, args=(
        net_state, config, stratum_clients, server_state, celery, exit_event))
    stratum_thread.daemon = True
    threads.append(stratum_thread)
    stratum_thread.start()

    # task in charge of rotating stats as needed
    stat_thread = threading.Thread(target=stat_runner, args=(
        server_state, celery, exit_event))
    stat_thread.daemon = True
    threads.append(stat_thread)
    stat_thread.start()

    # the agent server. allows peers to connect and send stat data about
    # a stratum worker
    if config['agent']['enabled']:
        agent_thread = threading.Thread(target=agent_runner, args=(
            config, stratum_clients, agent_clients, server_state, celery,
            exit_event))
        agent_thread.daemon = True
        threads.append(agent_thread)
        agent_thread.start()

    # the monitor server. a simple flask http server that lets you view
    # internal data structures to monitor server health
    if config['monitor']['enabled']:
        monitor_thread = threading.Thread(target=monitor_runner, args=(
            net_state, config, stratum_clients, server_state, agent_clients,
            exit_event))
        monitor_thread.daemon = True
        threads.append(monitor_thread)
        monitor_thread.start()

    try:
        while True:
            for thread in threads:
                thread.join(0.2)
    except KeyboardInterrupt:
        exit_event.set()
        logger.info("Exiting requested via SIGINT, cleaning up...")
        try:
            net_thread.join(config['term_timeout'])
            stratum_thread.join(config['term_timeout'])
            if net_thread.isAlive() or stratum_thread.isAlive():
                logger.info("Timeout reached, exiting without cleanup")
            else:
                logger.info("Cleanup complete, shutting down...")
        except KeyboardInterrupt:
            logger.info("Shutdown forced by system, exiting without cleanup")

        logger.info("=" * 80)
Esempio n. 20
0
 def testb58(self):
     assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
     _ohai = 'o hai'.encode('ascii')
     _tmp = b58encode(_ohai)
     assert _tmp == 'DYB3oMS'
     assert b58decode(_tmp, 5) == _ohai