def collect_minutes():
    """ Grabs all the pending minute shares out of redis and puts them in the
    database """
    unproc_mins = redis_conn.keys("min_*")
    for key in unproc_mins:
        current_app.logger.info("Processing key {}".format(key))
        share_type, algo, stamp = key.split("_")[1:]
        minute = datetime.datetime.utcfromtimestamp(float(stamp))
        # To ensure invalid stampt don't get committed
        minute = ShareSlice.floor_time(minute, 0)
        if stamp < (time.time() - 30):
            current_app.logger.info("Skipping timestamp {}, too young"
                                    .format(minute))
            continue

        redis_conn.rename(key, "processing_shares")
        for user, shares in redis_conn.hgetall("processing_shares").iteritems():

            shares = float(shares)
            # messily parse out the worker/address combo...
            parts = user.split(".")
            if len(parts) > 0:
                worker = parts[1]
            else:
                worker = ''
            address = parts[0]

            if address != "pool":
                try:
                    curr = currencies.lookup_payable_addr(address)
                except InvalidAddressException:
                    curr = None

                if not curr:
                    address = global_config.pool_payout_currency.pool_payout_addr

            try:
                slc = ShareSlice(user=address, time=minute, worker=worker, algo=algo,
                                 share_type=share_type, value=shares, span=0)
                db.session.add(slc)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                db.session.rollback()
                slc = ShareSlice.query.with_lockmode('update').filter_by(
                    user=address, time=minute, worker=worker, algo=algo,
                    share_type=share_type).one()
                slc.value += shares
                db.session.commit()
        redis_conn.delete("processing_shares")
    def test_span(self):
        start = datetime.datetime.utcnow()
        shares = list(xrange(300))
        for x in shares:
            now = start - datetime.timedelta(minutes=x)
            v = ShareSlice(time=now, value=x, **self.slice_test_data)
            self.db.session.add(v)
        self.db.session.commit()
        lower, upper = make_upper_lower()

        res = ShareSlice.get_span(stamp=True, lower=lower, upper=upper)
        self.assertEqual(len(res[0]['values']), 10)
        self.assertEqual(sum(res[0]['values'].values()), 45)

        res = ShareSlice.get_span(stamp=True)
        self.assertEqual(sum(res[0]['values'].values()), 44850)
    def test_span(self):
        start = datetime.datetime.utcnow()
        shares = list(xrange(300))
        for x in shares:
            now = start - datetime.timedelta(minutes=x)
            v = ShareSlice(time=now, value=x, **self.slice_test_data)
            self.db.session.add(v)
        self.db.session.commit()
        lower, upper = make_upper_lower()

        res = ShareSlice.get_span(stamp=True, lower=lower, upper=upper)
        self.assertEqual(len(res[0]['values']), 10)
        self.assertEqual(sum(res[0]['values'].values()), 45)

        res = ShareSlice.get_span(stamp=True)
        self.assertEqual(sum(res[0]['values'].values()), 44850)
Exemple #4
0
def leaderboard():
    users = {}
    lower_10, upper_10 = make_upper_lower(offset=datetime.timedelta(minutes=2))
    for slc in ShareSlice.get_span(share_type=("acc", ), ret_query=True, lower=lower_10, upper=upper_10):
        try:
            address_version(slc.user)
        except Exception:
            pass
        else:
            user = users.setdefault(slc.user, {})
            user.setdefault(slc.algo, [0, set()])
            user[slc.algo][0] += slc.value
            user[slc.algo][1].add(slc.time)

    # Loop through and convert a summation of shares into a hashrate. Converts
    # to hashes per second
    for user, algo_shares in users.iteritems():
        for algo_key, (shares, minutes) in algo_shares.items():
            algo_obj = algos[algo_key]
            algo_shares[algo_key] = algo_obj.hashes_per_share * (shares / (len(minutes) * 60))
            algo_shares.setdefault('normalized', 0)
            algo_shares['normalized'] += users[user][algo_key] * algo_obj.normalize_mult

    sorted_users = sorted(users.iteritems(),
                          key=lambda x: x[1]['normalized'],
                          reverse=True)

    # This is really bad.... XXX: Needs rework!
    if users:
        anon = anon_users()
        for i, (user, data) in enumerate(sorted_users):
            if user in anon:
                sorted_users[i] = ("Anonymous", data)

    cache.set("leaderboard", sorted_users, timeout=15 * 60)
def leaderboard():
    users = {}
    lower_10, upper_10 = make_upper_lower(offset=datetime.timedelta(minutes=1))
    for slc in ShareSlice.get_span(ret_query=True, lower=lower_10, upper=upper_10):
        try:
            address_version(slc.user)
        except Exception:
            pass
        else:
            user = users.setdefault(slc.user, {})
            user.setdefault(slc.algo, [0, set()])
            user[slc.algo][0] += slc.value
            user[slc.algo][1].add(slc.time)

    # Loop through and convert a summation of shares into a hashrate. Converts
    # to hashes per second
    for user, algo_shares in users.iteritems():
        for algo_key, (shares, minutes) in algo_shares.items():
            algo_obj = algos[algo_key]
            algo_shares[algo_key] = algo_obj.hashes_per_share * (shares / (len(minutes) * 60))
            algo_shares.setdefault('normalized', 0)
            algo_shares['normalized'] += users[user][algo_key] * algo_obj.normalize_mult

    sorted_users = sorted(users.iteritems(),
                          key=lambda x: x[1]['normalized'],
                          reverse=True)

    # This is really bad.... XXX: Needs rework!
    if users:
        anon = anon_users()
        for i, (user, data) in enumerate(sorted_users):
            if user in anon:
                sorted_users[i] = ("Anonymous", data)

    cache.set("leaderboard", sorted_users, timeout=15 * 60)
Exemple #6
0
def import_shares(input):
    for i, line in enumerate(input):
        data = json.loads(line)
        data['time'] = datetime.datetime.utcfromtimestamp(data['time'])
        slc = ShareSlice(algo="scrypt", **data)
        floored = DeviceSlice.floor_time(data['time'], data['span'])
        if data['time'] != floored:
            current_app.logger.warn("{} != {}".format(data['time'], floored))
        data['time'] = floored
        db.session.add(slc)
        if i % 100 == 0:
            print "{} completed".format(i)
            db.session.commit()
    def test_compress(self):
        start = datetime.datetime.utcnow().replace(second=0, microsecond=0)
        for x in xrange(1500):
            now = start - datetime.timedelta(minutes=x)
            v = ShareSlice(time=now, value=x, **self.slice_test_data)
            self.db.session.add(v)
        self.db.session.commit()
        ShareSlice.compress(0)
        self.db.session.commit()
        lower, upper = make_upper_lower()

        res = ShareSlice.get_span(stamp=True, lower=lower, upper=upper)
        self.assertEqual(len(res[0]['values']), 10)
        self.assertEqual(sum(res[0]['values'].values()), 45)

        res = ShareSlice.get_span(stamp=True)
        print(res)
        self.assertEqual(sum(res[0]['values'].values()), 1124250)

        spans = {0: 0, 1: 0, 2: 0}
        for slc in ShareSlice.get_span(ret_query=True):
            spans[slc.span] += 1
        print spans
        assert spans[0] <= 65
        assert spans[1] >= 48

        ShareSlice.compress(1)
        self.db.session.commit()

        spans = {0: 0, 1: 0, 2: 0}
        for slc in ShareSlice.get_span(ret_query=True):
            spans[slc.span] += 1
        print spans
        assert spans[0] <= 65
        assert spans[1] <= 288
        assert spans[2] >= 1

        res = ShareSlice.get_span(stamp=True)
        self.assertEqual(sum(res[0]['values'].values()), 1124250)
Exemple #8
0
def collect_minutes():
    """ Grabs all the pending minute shares out of redis and puts them in the
    database """
    unproc_mins = redis_conn.keys("min_*")
    for key in unproc_mins:
        current_app.logger.info("Processing key {}".format(key))
        share_type, algo, stamp = key.split("_")[1:]
        minute = datetime.datetime.utcfromtimestamp(float(stamp))
        # To ensure invalid stampt don't get committed
        minute = ShareSlice.floor_time(minute, 0)
        if stamp < (time.time() - 30):
            current_app.logger.info("Skipping timestamp {}, too young"
                                    .format(minute))
            continue

        redis_conn.rename(key, "processing_shares")
        for user, shares in redis_conn.hgetall("processing_shares").iteritems():

            shares = float(shares)
            # messily parse out the worker/address combo...
            parts = user.split(".")
            if len(parts) > 0:
                worker = parts[1]
            else:
                worker = ''
            address = parts[0]

            if not address.startswith("pool"):
                try:
                    curr = currencies.lookup_payable_addr(address)
                except InvalidAddressException:
                    curr = None

                if curr is None:
                    address = global_config.pool_payout_currency.pool_payout_addr

            try:
                slc = ShareSlice(user=address, time=minute, worker=worker, algo=algo,
                                 share_type=share_type, value=shares, span=0)
                db.session.add(slc)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                db.session.rollback()
                slc = ShareSlice.query.with_lockmode('update').filter_by(
                    user=address, time=minute, worker=worker, algo=algo,
                    share_type=share_type).one()
                slc.value += shares
                db.session.commit()
        redis_conn.delete("processing_shares")
Exemple #9
0
def _grab_data(prefix, stat):
    proc_name = "processing_{}".format(stat)
    unproc_mins = redis_conn.keys(prefix)
    for key in unproc_mins:
        current_app.logger.info("Processing key {}".format(key))
        try:
            (stamp, ) = key.split("_")[1:]
        except Exception:
            current_app.logger.error("Error processing key {}".format(key),
                                     exc_info=True)
            continue
        minute = datetime.datetime.utcfromtimestamp(float(stamp))
        # To ensure invalid stampt don't get committed
        minute = ShareSlice.floor_time(minute, 0)
        if stamp < (time.time() - 30):
            current_app.logger.info("Skipping timestamp {}, too young".format(minute))
            continue

        redis_conn.rename(key, proc_name)
        for user, value in redis_conn.hgetall(proc_name).iteritems():
            try:
                address, worker, did = user.split("_")
                try:
                    value = float(value)
                except ValueError:
                    if value != "None":
                        current_app.logger.warn(
                            "Got bogus value {} from ppagent for stat {}"
                            .format(value, stat), exc_info=True)
                    continue

                # Megahashes are was cgminer reports
                if stat == "hashrate":
                    value *= 1000000
            except Exception:
                current_app.logger.error("Error processing key {} on hash {}"
                                         .format(user, key), exc_info=True)
                continue

            try:
                slc = DeviceSlice(user=address, time=minute, worker=worker,
                                  device=did, stat=stat, value=value, span=0)
                db.session.add(slc)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                current_app.logger.warn("SQLAlchemy collision", exc_info=True)
                db.session.rollback()
        redis_conn.delete(proc_name)
def _grab_data(prefix, stat):
    proc_name = "processing_{}".format(stat)
    unproc_mins = redis_conn.keys(prefix)
    for key in unproc_mins:
        current_app.logger.info("Processing key {}".format(key))
        try:
            (stamp, ) = key.split("_")[1:]
        except Exception:
            current_app.logger.error("Error processing key {}".format(key),
                                     exc_info=True)
            continue
        minute = datetime.datetime.utcfromtimestamp(float(stamp))
        # To ensure invalid stampt don't get committed
        minute = ShareSlice.floor_time(minute, 0)
        if stamp < (time.time() - 30):
            current_app.logger.info("Skipping timestamp {}, too young".format(minute))
            continue

        redis_conn.rename(key, proc_name)
        for user, value in redis_conn.hgetall(proc_name).iteritems():
            try:
                address, worker, did = user.split("_")
                try:
                    value = float(value)
                except ValueError:
                    if value != "None":
                        current_app.logger.warn(
                            "Got bogus value {} from ppagent for stat {}"
                            .format(value, stat), exc_info=True)
                    continue

                # Megahashes are was cgminer reports
                if stat == "hashrate":
                    value *= 1000000
            except Exception:
                current_app.logger.error("Error processing key {} on hash {}"
                                         .format(user, key), exc_info=True)
                continue

            try:
                slc = DeviceSlice(user=address, time=minute, worker=worker,
                                  device=did, stat=stat, value=value, span=0)
                db.session.add(slc)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                current_app.logger.warn("SQLAlchemy collision", exc_info=True)
                db.session.rollback()
        redis_conn.delete(proc_name)
    def test_compress(self):
        start = datetime.datetime.utcnow().replace(second=0, microsecond=0)
        for x in xrange(1500):
            now = start - datetime.timedelta(minutes=x)
            v = ShareSlice(time=now, value=x, **self.slice_test_data)
            self.db.session.add(v)
        self.db.session.commit()
        ShareSlice.compress(0)
        self.db.session.commit()
        lower, upper = make_upper_lower()

        res = ShareSlice.get_span(stamp=True, lower=lower, upper=upper)
        self.assertEqual(len(res[0]['values']), 10)
        self.assertEqual(sum(res[0]['values'].values()), 45)

        res = ShareSlice.get_span(stamp=True)
        print(res)
        self.assertEqual(sum(res[0]['values'].values()), 1124250)

        spans = {0: 0, 1: 0, 2: 0}
        for slc in ShareSlice.get_span(ret_query=True):
            spans[slc.span] += 1
        print spans
        assert spans[0] <= 65
        assert spans[1] >= 48

        ShareSlice.compress(1)
        self.db.session.commit()

        spans = {0: 0, 1: 0, 2: 0}
        for slc in ShareSlice.get_span(ret_query=True):
            spans[slc.span] += 1
        print spans
        assert spans[0] <= 65
        assert spans[1] <= 288
        assert spans[2] >= 1

        res = ShareSlice.get_span(stamp=True)
        self.assertEqual(sum(res[0]['values'].values()), 1124250)
Exemple #12
0
def compress_five_minute():
    ShareSlice.compress(1)
    DeviceSlice.compress(1)
    db.session.commit()
Exemple #13
0
def compress_minute():
    ShareSlice.compress(0)
    DeviceSlice.compress(0)
    db.session.commit()
def compress_five_minute():
    ShareSlice.compress(1)
    DeviceSlice.compress(1)
    db.session.commit()
def compress_minute():
    ShareSlice.compress(0)
    DeviceSlice.compress(0)
    db.session.commit()