Esempio n. 1
0
def collect_minutes():
    """ Grabs all the pending minute shares out of redis and puts them in the
    database """
    unproc_mins = redis_conn.keys("min_*")
    for key in unproc_mins:
        current_app.logger.info("Processing key {}".format(key))
        share_type, algo, stamp = key.split("_")[1:]
        minute = datetime.datetime.utcfromtimestamp(float(stamp))
        # To ensure invalid stampt don't get committed
        minute = ShareSlice.floor_time(minute, 0)
        if stamp < (time.time() - 30):
            current_app.logger.info("Skipping timestamp {}, too young"
                                    .format(minute))
            continue

        redis_conn.rename(key, "processing_shares")
        for user, shares in redis_conn.hgetall("processing_shares").iteritems():

            shares = float(shares)
            # messily parse out the worker/address combo...
            parts = user.split(".")
            if len(parts) > 0:
                worker = parts[1]
            else:
                worker = ''
            address = parts[0]

            if not address.startswith("pool"):
                try:
                    curr = currencies.lookup_payable_addr(address)
                except InvalidAddressException:
                    curr = None

                if curr is None:
                    address = global_config.pool_payout_currency.pool_payout_addr

            try:
                slc = ShareSlice(user=address, time=minute, worker=worker, algo=algo,
                                 share_type=share_type, value=shares, span=0)
                db.session.add(slc)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                db.session.rollback()
                slc = ShareSlice.query.with_lockmode('update').filter_by(
                    user=address, time=minute, worker=worker, algo=algo,
                    share_type=share_type).one()
                slc.value += shares
                db.session.commit()
        redis_conn.delete("processing_shares")
Esempio n. 2
0
def collect_minutes():
    """ Grabs all the pending minute shares out of redis and puts them in the
    database """
    unproc_mins = redis_conn.keys("min_*")
    for key in unproc_mins:
        current_app.logger.info("Processing key {}".format(key))
        share_type, algo, stamp = key.split("_")[1:]
        minute = datetime.datetime.utcfromtimestamp(float(stamp))
        # To ensure invalid stampt don't get committed
        minute = ShareSlice.floor_time(minute, 0)
        if stamp < (time.time() - 30):
            current_app.logger.info("Skipping timestamp {}, too young"
                                    .format(minute))
            continue

        redis_conn.rename(key, "processing_shares")
        for user, shares in redis_conn.hgetall("processing_shares").iteritems():

            shares = float(shares)
            # messily parse out the worker/address combo...
            parts = user.split(".")
            if len(parts) > 0:
                worker = parts[1]
            else:
                worker = ''
            address = parts[0]

            if address != "pool":
                try:
                    curr = currencies.lookup_payable_addr(address)
                except InvalidAddressException:
                    curr = None

                if not curr:
                    address = global_config.pool_payout_currency.pool_payout_addr

            try:
                slc = ShareSlice(user=address, time=minute, worker=worker, algo=algo,
                                 share_type=share_type, value=shares, span=0)
                db.session.add(slc)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                db.session.rollback()
                slc = ShareSlice.query.with_lockmode('update').filter_by(
                    user=address, time=minute, worker=worker, algo=algo,
                    share_type=share_type).one()
                slc.value += shares
                db.session.commit()
        redis_conn.delete("processing_shares")
Esempio n. 3
0
def _grab_data(prefix, stat):
    proc_name = "processing_{}".format(stat)
    unproc_mins = redis_conn.keys(prefix)
    for key in unproc_mins:
        current_app.logger.info("Processing key {}".format(key))
        try:
            (stamp, ) = key.split("_")[1:]
        except Exception:
            current_app.logger.error("Error processing key {}".format(key),
                                     exc_info=True)
            continue
        minute = datetime.datetime.utcfromtimestamp(float(stamp))
        # To ensure invalid stampt don't get committed
        minute = ShareSlice.floor_time(minute, 0)
        if stamp < (time.time() - 30):
            current_app.logger.info("Skipping timestamp {}, too young".format(minute))
            continue

        redis_conn.rename(key, proc_name)
        for user, value in redis_conn.hgetall(proc_name).iteritems():
            try:
                address, worker, did = user.split("_")
                try:
                    value = float(value)
                except ValueError:
                    if value != "None":
                        current_app.logger.warn(
                            "Got bogus value {} from ppagent for stat {}"
                            .format(value, stat), exc_info=True)
                    continue

                # Megahashes are was cgminer reports
                if stat == "hashrate":
                    value *= 1000000
            except Exception:
                current_app.logger.error("Error processing key {} on hash {}"
                                         .format(user, key), exc_info=True)
                continue

            try:
                slc = DeviceSlice(user=address, time=minute, worker=worker,
                                  device=did, stat=stat, value=value, span=0)
                db.session.add(slc)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                current_app.logger.warn("SQLAlchemy collision", exc_info=True)
                db.session.rollback()
        redis_conn.delete(proc_name)
Esempio n. 4
0
def _grab_data(prefix, stat):
    proc_name = "processing_{}".format(stat)
    unproc_mins = redis_conn.keys(prefix)
    for key in unproc_mins:
        current_app.logger.info("Processing key {}".format(key))
        try:
            (stamp, ) = key.split("_")[1:]
        except Exception:
            current_app.logger.error("Error processing key {}".format(key),
                                     exc_info=True)
            continue
        minute = datetime.datetime.utcfromtimestamp(float(stamp))
        # To ensure invalid stampt don't get committed
        minute = ShareSlice.floor_time(minute, 0)
        if stamp < (time.time() - 30):
            current_app.logger.info("Skipping timestamp {}, too young".format(minute))
            continue

        redis_conn.rename(key, proc_name)
        for user, value in redis_conn.hgetall(proc_name).iteritems():
            try:
                address, worker, did = user.split("_")
                try:
                    value = float(value)
                except ValueError:
                    if value != "None":
                        current_app.logger.warn(
                            "Got bogus value {} from ppagent for stat {}"
                            .format(value, stat), exc_info=True)
                    continue

                # Megahashes are was cgminer reports
                if stat == "hashrate":
                    value *= 1000000
            except Exception:
                current_app.logger.error("Error processing key {} on hash {}"
                                         .format(user, key), exc_info=True)
                continue

            try:
                slc = DeviceSlice(user=address, time=minute, worker=worker,
                                  device=did, stat=stat, value=value, span=0)
                db.session.add(slc)
                db.session.commit()
            except sqlalchemy.exc.IntegrityError:
                current_app.logger.warn("SQLAlchemy collision", exc_info=True)
                db.session.rollback()
        redis_conn.delete(proc_name)
Esempio n. 5
0
def compress_slices():
    for chain in chains.itervalues():
        # Get the index of the last inserted share slice on this chain
        last_complete_slice = redis_conn.get("chain_{}_slice_index".format(chain.id))
        if last_complete_slice is None:
            # Chain must not be in use....
            current_app.logger.debug(
                "No slice index for chain {}".format(chain))
            continue
        else:
            last_complete_slice = int(last_complete_slice)

        # Loop thorugh all possible share slice numbers
        empty = 0
        encoding_time = 0.0
        retrieval_time = 0.0
        entry_count = 0
        encoded_size = 0
        original_size = 0
        last_slice = last_complete_slice
        for slc_idx in xrange(last_complete_slice, 0, -1):
            key = "chain_{}_slice_{}".format(chain.id, slc_idx)
            key_type = redis_conn.type(key)

            # Compress if it's a list. This is raw data from powerpools redis
            # reporter
            if key_type == "list":
                # Reduce empty counter, but don't go negative
                empty = max(0, empty - 1)

                # Retrieve the enencoded information from redis
                t = time.time()
                slice_shares = redis_conn.lrange(key, 0, -1)
                this_original_size = int(redis_conn.debug_object(key)['serializedlength'])
                this_retrieval_time = time.time() - t

                # Parse the list into proper python representation
                data = []
                total_shares = 0
                for entry in slice_shares:
                    user, shares = entry.split(":")
                    shares = Decimal(shares)
                    data.append((user, shares))
                    total_shares += shares
                this_entry_count = len(data)

                # serialization and compression
                t = time.time()
                data = json.dumps(data, separators=(',', ':'), use_decimal=True)
                data = bz2.compress(data)
                this_encoding_time = time.time() - t

                # Put all the new data into a temporary key, then atomically
                # replace the old list key. ensures we never loose data, even
                # on failures (exceptions)
                key_compressed = key + "_compressed"
                redis_conn.hmset(key_compressed,
                                 dict(
                                     date=int(time.time()),
                                     data=data,
                                     encoding="bz2json",
                                     total_shares=total_shares)
                                 )
                redis_conn.rename(key_compressed, key)
                this_encoded_size = int(redis_conn.debug_object(key)['serializedlength'])

                last_slice = slc_idx
                # Update all the aggregates
                encoding_time += this_encoding_time
                retrieval_time += this_retrieval_time
                entry_count += this_entry_count
                encoded_size += this_encoded_size
                original_size += this_original_size
                # Print progress
                current_app.logger.info(
                    "Encoded slice #{:,} containing {:,} entries."
                    " retrieval_time: {}; encoding_time: {}; start_size: {:,}; end_size: {:,}; ratio: {}"
                    .format(slc_idx, this_entry_count,
                            time_format(this_retrieval_time),
                            time_format(this_encoding_time),
                            this_original_size, this_encoded_size,
                            float(this_original_size) / (this_encoded_size or 1)))

            # Count an empty entry to detect the end of live slices
            elif key_type == "none":
                empty += 1

            # If we've seen a lot of empty slices, probably nothing else to find!
            if empty >= 20:
                current_app.logger.info(
                    "Ended compression search at {}".format(slc_idx))
                break

        current_app.logger.info(
            "Encoded from slice #{:,} -> #{:,} containing {:,} entries."
            " retrieval_time: {}; encoding_time: {}; start_size: {:,}; end_size: {:,}; ratio: {}"
            .format(last_complete_slice, last_slice, entry_count,
                    time_format(retrieval_time), time_format(encoding_time),
                    original_size, encoded_size,
                    float(original_size) / (encoded_size or 1)))
Esempio n. 6
0
def compress_slices():
    for chain in chains.itervalues():
        # Get the index of the last inserted share slice on this chain
        last_complete_slice = redis_conn.get("chain_{}_slice_index".format(chain.id))
        if last_complete_slice is None:
            # Chain must not be in use....
            current_app.logger.debug(
                "No slice index for chain {}".format(chain))
            continue
        else:
            last_complete_slice = int(last_complete_slice)

        # Loop thorugh all possible share slice numbers
        empty = 0
        encoding_time = 0.0
        retrieval_time = 0.0
        entry_count = 0
        encoded_size = 0
        original_size = 0
        last_slice = last_complete_slice
        for slc_idx in xrange(last_complete_slice, 0, -1):
            key = "chain_{}_slice_{}".format(chain.id, slc_idx)
            key_type = redis_conn.type(key)

            # Compress if it's a list. This is raw data from powerpools redis
            # reporter
            if key_type == "list":
                # Reduce empty counter, but don't go negative
                empty = max(0, empty - 1)

                # Retrieve the enencoded information from redis
                t = time.time()
                slice_shares = redis_conn.lrange(key, 0, -1)
                this_original_size = int(redis_conn.debug_object(key)['serializedlength'])
                this_retrieval_time = time.time() - t

                # Parse the list into proper python representation
                data = []
                total_shares = 0
                for entry in slice_shares:
                    user, shares = entry.split(":")
                    shares = Decimal(shares)
                    data.append((user, shares))
                    total_shares += shares
                this_entry_count = len(data)

                # serialization and compression
                t = time.time()
                data = json.dumps(data, separators=(',', ':'), use_decimal=True)
                data = bz2.compress(data)
                this_encoding_time = time.time() - t

                # Put all the new data into a temporary key, then atomically
                # replace the old list key. ensures we never loose data, even
                # on failures (exceptions)
                key_compressed = key + "_compressed"
                redis_conn.hmset(key_compressed,
                                 dict(
                                     date=int(time.time()),
                                     data=data,
                                     encoding="bz2json",
                                     total_shares=total_shares)
                                 )
                redis_conn.rename(key_compressed, key)
                this_encoded_size = int(redis_conn.debug_object(key)['serializedlength'])

                last_slice = slc_idx
                # Update all the aggregates
                encoding_time += this_encoding_time
                retrieval_time += this_retrieval_time
                entry_count += this_entry_count
                encoded_size += this_encoded_size
                original_size += this_original_size
                # Print progress
                current_app.logger.info(
                    "Encoded slice #{:,} containing {:,} entries."
                    " retrieval_time: {}; encoding_time: {}; start_size: {:,}; end_size: {:,}; ratio: {}"
                    .format(slc_idx, this_entry_count,
                            time_format(this_retrieval_time),
                            time_format(this_encoding_time),
                            this_original_size, this_encoded_size,
                            float(this_original_size) / (this_encoded_size or 1)))

            # Count an empty entry to detect the end of live slices
            elif key_type == "none":
                empty += 1

            # If we've seen a lot of empty slices, probably nothing else to find!
            if empty >= 20:
                current_app.logger.info(
                    "Ended compression search at {}".format(slc_idx))
                break

        current_app.logger.info(
            "Encoded from slice #{:,} -> #{:,} containing {:,} entries."
            " retrieval_time: {}; encoding_time: {}; start_size: {:,}; end_size: {:,}; ratio: {}"
            .format(last_complete_slice, last_slice, entry_count,
                    time_format(retrieval_time), time_format(encoding_time),
                    original_size, encoded_size,
                    float(original_size) / (encoded_size or 1)))