Exemplo n.º 1
0
def get_lps_share_integral_for_pools(_df, _realtime=None, _exclusions={}):
    LOGGER.debug('get_lps_share_integral_for_pools')
    pools_list = _df.index.get_level_values('pool_address').drop_duplicates()
    lm_shares_df = pd.DataFrame()
    for pool in pools_list:
        LOGGER.info('Computing shares of incentives for pool ' + pool)
        pool_data = _df.loc[pool]
        uneligible_lps = [
            address.lower() for address in _exclusions.get(pool, [])
        ]
        eligible_lps_pool_data = pool_data.query(
            f'lp_address not in {uneligible_lps}')
        if len(uneligible_lps) > 0:
            LOGGER.info(
                f'Total LPs: {len(pool_data.index.get_level_values("lp_address").drop_duplicates())}'
            )
            LOGGER.info(f'Uneligible LPs: {uneligible_lps}')
            LOGGER.info(
                f'Eligilble LPs: {len(eligible_lps_pool_data.index.get_level_values("lp_address").drop_duplicates())}'
            )
        lps_shares = get_lps_share_integral_for_pool(
            eligible_lps_pool_data,
            _realtime=_realtime,
            _exclusions=uneligible_lps)
        pool_df = pd.DataFrame(lps_shares)
        pool_df['pool_address'] = pool
        lm_shares_df = lm_shares_df.append(pool_df)
    lm_shares_df = lm_shares_df.reset_index().set_index(
        ['pool_address', 'lp_address'])
    return lm_shares_df
Exemplo n.º 2
0
def get_lps_share_integral_for_pool(_df, _realtime=None, _exclusions={}):
    LOGGER.debug('get_lps_share_integral_for_pool')
    df = compute_LM_power_timeseries(_df)

    latest_timestamp = df.index.get_level_values('block_timestamp').max()
    intervals = compute_timestamp_intervals(
        df.index.get_level_values('block_timestamp'), _realtime)
    df = df.join(intervals)

    total_lm_power = compute_total_LM_power_timeseries(df)
    df = df.join(total_lm_power)

    df['lm_share'] = df['lm_power'] / df['total_lm_power']
    df['share_integral'] = df['lm_share'] * df['state_duration']

    latest_share = df.iloc[df.index.get_locs([slice(None),
                                              latest_timestamp])]['lm_share']
    latest_share = latest_share.droplevel('block_timestamp')

    total_share_integral = df['share_integral'].sum()
    lp_lm_share = df.groupby(
        'lp_address')['share_integral'].sum() / total_share_integral
    result = latest_share.to_frame().join(lp_lm_share)
    result.columns = ['latest_share', 'share_integral']
    return result
Exemplo n.º 3
0
def get_lm_allocations(_chain_id, _week_number=0, _realtime=None):
    LOGGER.debug('get_lm_allocations')
    week_passed = 3 / 7
    if _realtime:
        _week_number = get_current_lm_week_number()
        week_passed = get_percent_week_passed()

    jsonurl = urlopen(V2_LM_ALLOCATION_URL)
    try:
        week_allocation = json.loads(jsonurl.read())[f'week_{_week_number}']
    except KeyError:
        week_allocation = {}
    for chain_allocation in week_allocation:
        if chain_allocation['chainId'] == _chain_id:
            df = pd.DataFrame()
            for pool, rewards in chain_allocation['pools'].items():
                for r in rewards:
                    if r['tokenAddress'] in EXCLUDED_POOLS_TOKENS.get(
                            pool, []):
                        continue
                    pool_address = pool[:42].lower()
                    df.loc[pool_address,
                           r['tokenAddress']] = r['amount'] * week_passed
            if len(df) == 0:
                LOGGER.info('No incentives for this chain')
                continue
            df.fillna(0, inplace=True)
            df.index.name = 'pool_address'
            return df, week_passed
Exemplo n.º 4
0
def apply_redirects(_data, _realtime=None, _redirects=None):
    if _redirects:
        redirects = _redirects
    else:
        redirects = get_redirects(_realtime)
    result = pd.DataFrame(_data).reset_index()
    result['lp_address'] = result['lp_address'].apply(Web3.toChecksumAddress)
    n = len(result['lp_address'][result['lp_address'].isin(redirects.keys())])
    LOGGER.debug(f'{n} redirectors found amongst the recipients')
    result['lp_address'] = result['lp_address'].apply(
        lambda x: redirects.get(x, x))
    result = result.groupby('lp_address').sum()
    return result, n
Exemplo n.º 5
0
def compute_timestamp_intervals(_blocks, _realtime=None):
    LOGGER.debug('compute_timestamp_intervals')
    blocks = pd.Series(_blocks).drop_duplicates().sort_values().values
    intervals = pd.Series(blocks, index=blocks).diff().shift(-1)
    if _realtime:
        intervals.iloc[-1] = datetime.datetime.utcnow() - intervals.index[-1]
    else:
        intervals.iloc[-1] = intervals.index[0] + \
            datetime.timedelta(days=7) - intervals.index[-1]
    intervals = intervals.dt.total_seconds()
    intervals.name = 'state_duration'
    intervals.index.name = 'block_timestamp'
    return intervals
Exemplo n.º 6
0
def query_gbq(_network, _week_number, _pool_list, _excluded_lps_list=[]):
    LOGGER.debug('query_gbq')

    _excluded_lps_list = list(set(_excluded_lps_list + BASE_LP_EXCLUSION_LIST))

    with open(SQL_FILE_PATH, 'r') as file:
        sql = file.read()

    _days_in_week = '3'

    sql = sql.format(
        week_number=_week_number,
        pool_addresses="','".join(_pool_list),
        blocks_table=TABLES_CONFIGS[_network]['blocks'],
        lm_transfers_table=TABLES_CONFIGS[_network]['lm_transfers'],
        lm_state_table=TABLES_CONFIGS[_network]['lm_state'],
        excluded_lps="','".join(_excluded_lps_list),
        days_in_week=_days_in_week)
    client = bigquery.Client()
    bqstorageclient = bigquery_storage.BigQueryReadClient()
    df = (client.query(sql).result().to_dataframe(
        bqstorage_client=bqstorageclient))
    df = df.groupby(['pool_address', 'lp_address', 'block_timestamp']).sum()
    return df
Exemplo n.º 7
0
def save_report(_week_number, _chain_id, _token, _data):
    LOGGER.debug(f'saving {_token} report...')
    network = NETWORKS[_chain_id]
    reports_dir = f'reports/{_week_number}'
    if not os.path.exists(reports_dir):
        os.mkdir(reports_dir)
    filename = f'{reports_dir}/__{network}_{_token}.json'
    export_data = _data[_data > get_claim_threshold(_token)]
    export = export_data.apply(
        lambda x: format(x, f'.{get_claim_precision(_token)}f'))
    export_json = export.to_json()
    parsed_export = json.loads(export_json)
    with open(filename, "w") as write_file:
        json.dump(parsed_export, write_file, indent=4)
    LOGGER.debug(f'saved to {filename}')
    if _chain_id == 1 and _token == '0xba100000625a3754423978a60c9317c58a424e3d':
        filename = f'{reports_dir}/_totals.json'
        with open(filename, "w") as write_file:
            json.dump(parsed_export, write_file, indent=4)
        LOGGER.debug(f'saved to {filename}')
Exemplo n.º 8
0
def compute_LM_power_timeseries(_df):
    LOGGER.debug('compute_LM_power_timeseries')
    df = _df.copy()
    df['lm_power'] = df.sort_index().groupby(['lp_address']).cumsum()
    df = df.drop(columns=['delta'])
    df = df.clip(lower=0)

    lp_addresses_list = (df.index.get_level_values(
        'lp_address').drop_duplicates().sort_values().values)
    block_timestamps_list = (df.index.get_level_values(
        'block_timestamp').drop_duplicates().sort_values().values)
    levels = [lp_addresses_list, block_timestamps_list]
    new_index = pd.MultiIndex.from_product(
        levels,
        names=['lp_address', 'block_timestamp'],
    )
    df = df.tz_localize(None, level='block_timestamp')
    LOGGER.debug('reindexing ({})...'.format(len(block_timestamps_list)))
    df = df.reindex(index=new_index)
    df.loc(axis=0)[:, block_timestamps_list[0]] = df.loc(
        axis=0)[:, block_timestamps_list[0]].fillna(0)
    df = df.fillna(method='pad')
    LOGGER.debug('done')
    return df
Exemplo n.º 9
0
     .set_index('token_address', append=True)
 )
 # compute the velocity at which each LP is earning tokens
 velocity_df = (
     velocity_df
     .groupby('lp_address')
     .sum()
     .melt(var_name='token_address', value_name='velocity', ignore_index=False)
     .set_index('token_address', append=True)
 )
 results_df = amounts_df.join(velocity_df)
 results_tokens = results_df.index.get_level_values(
     'token_address').drop_duplicates()
 # apply redirects and save reports
 for token in results_tokens:
     LOGGER.debug(f'Redirecting {token}...')
     export_data = results_df.loc[(slice(None), [token]), [
         'earned']].droplevel('token_address')
     redirects = apply_redirects(export_data)
     export_data = redirects[0]
     redirected_n = redirects[1]
     LOGGER.info(f'{token} - {redirected_n} redirectors')
     filename = save_report(week_number, chain_id,
                            token, export_data['earned'])
 if realtime_estimator:
     results_df = results_df.reset_index()
     results_df['lp_address'] = results_df['lp_address'].apply(
         Web3.toChecksumAddress)
     results_df = results_df.rename(columns={'lp_address': 'address'})
     results_df['timestamp'] = get_current_lm_week_end_timestamp()
     results_df['week'] = week_number
Exemplo n.º 10
0
def compute_total_LM_power_timeseries(_df):
    LOGGER.debug('compute_total_LM_power_timeseries')
    total_lm_power = _df.groupby('block_timestamp')['lm_power'].sum()
    total_lm_power.name = 'total_lm_power'
    return total_lm_power