Beispiel #1
0
    def get_previous_states_metadata(self, limit: int = 20) -> List[Dict]:
        # dash threading means that connection needs to be established in same thread as use
        db_conn = sqlite3.connect(self._db_filepath)
        states_dict_list = []
        try:
            result = db_conn.execute(
                f"SELECT * FROM {CrawlerNodeStorage.STATE_DB_NAME} "
                f"ORDER BY datetime(updated) DESC LIMIT {limit}")

            # TODO use `pandas` package instead to automatically get dict?
            column_names = [
                description[0] for description in result.description
            ]
            for row in result:
                state_info = dict()
                for idx, value in enumerate(row):
                    column_name = column_names[idx]
                    if column_name == 'updated':
                        # convert column from rfc3339 (for sorting) back to rfc2822
                        # TODO does this matter for displaying? - it doesn't, but rfc2822 is easier on the eyes
                        state_info[column_name] = MayaDT.from_rfc3339(
                            row[idx]).rfc2822()
                    else:
                        state_info[column_name] = row[idx]
                states_dict_list.append(state_info)

            return states_dict_list
        finally:
            db_conn.close()
Beispiel #2
0
    def get_historical_locked_tokens_over_range(self, days: int):
        range_begin, range_end = self._get_range_bookends(days)
        results = list(
            self._client.query(
                f"SELECT SUM(locked_stake) "
                f"FROM ("
                f"SELECT staker_address, current_period, "
                f"LAST(locked_stake) "
                f"AS locked_stake "
                f"FROM {Crawler.NODE_MEASUREMENT} "
                f"WHERE time >= '{MayaDT.from_datetime(range_begin).rfc3339()}' "
                f"AND "
                f"time < '{MayaDT.from_datetime(range_end).rfc3339()}' "
                f"GROUP BY staker_address, time(1d)"
                f") "
                f"GROUP BY time(1d)").get_points())

        # Note: all days may not have values eg. days before DB started getting populated
        # As time progresses this should be less of an issue
        locked_tokens_dict = OrderedDict()
        for r in results:
            locked_stake = r['sum']
            if locked_stake:
                # Dash accepts datetime objects for graphs
                locked_tokens_dict[MayaDT.from_rfc3339(
                    r['time']).datetime()] = locked_stake

        return locked_tokens_dict
Beispiel #3
0
    def get_historical_num_stakers_over_range(self, days: int):
        range_begin, range_end = self._get_range_bookends(days)
        results = list(
            self._client.query(
                f"SELECT COUNT(staker_address) FROM "
                f"("
                f"SELECT staker_address, LAST(locked_stake)"
                f"FROM {Crawler.NODE_MEASUREMENT} WHERE "
                f"time >= '{MayaDT.from_datetime(range_begin).rfc3339()}' AND "
                f"time < '{MayaDT.from_datetime(range_end).rfc3339()}' "
                f"GROUP BY staker_address, time(1d)"
                f") "
                "GROUP BY time(1d)").get_points())  # 1 day measurements

        # Note: all days may not have values eg. days before DB started getting populated
        # As time progresses this should be less of an issue
        num_stakers_dict = OrderedDict()
        for r in results:
            locked_stake = r['count']
            if locked_stake:
                # Dash accepts datetime objects for graphs
                num_stakers_dict[MayaDT.from_rfc3339(
                    r['time']).datetime()] = locked_stake

        return num_stakers_dict
Beispiel #4
0
    def get_historical_locked_tokens_over_range(self, days: int):
        today = datetime.utcnow()
        range_end = datetime(year=today.year, month=today.month, day=today.day,
                             hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)  # include today
        range_begin = range_end - timedelta(days=days)
        results = list(self._client.query(f"SELECT SUM(locked_stake) "
                                          f"FROM ("
                                          f"SELECT staker_address, current_period, "
                                          f"LAST(locked_stake) "
                                          f"AS locked_stake "
                                          f"FROM moe_network_info "
                                          f"WHERE time >= '{MayaDT.from_datetime(range_begin).rfc3339()}' "
                                          f"AND "
                                          f"time < '{MayaDT.from_datetime(range_end).rfc3339()}' "
                                          f"GROUP BY staker_address, time(1d)"
                                          f") "
                                          f"GROUP BY time(1d)").get_points())

        # Note: all days may not have values eg. days before DB started getting populated
        # As time progresses this should be less of an issue
        locked_tokens_dict = OrderedDict()
        for r in results:
            locked_stake = r['sum']
            if locked_stake:
                # Dash accepts datetime objects for graphs
                locked_tokens_dict[MayaDT.from_rfc3339(r['time']).datetime()] = locked_stake

        return locked_tokens_dict
Beispiel #5
0
def test_blockchain_client_get_historical_locked_tokens(new_influx_db):
    mock_influxdb_client = new_influx_db.return_value

    mock_query_object = MagicMock(spec=ResultSet, autospec=True)
    mock_influxdb_client.query.return_value = mock_query_object

    # fake results for 5 days
    days = 5
    start_date = maya.now().subtract(days=days)
    base_amount = 45000
    amount_increment = 10000

    results = []
    for day in range(0, days):
        results.append(dict(time=start_date.add(days=day).rfc3339(), sum=base_amount + (day * amount_increment)))
    mock_query_object.get_points.return_value = results

    blockchain_db_client = CrawlerInfluxClient(None, None, None)

    locked_tokens_dict = blockchain_db_client.get_historical_locked_tokens_over_range(days)

    # check query
    today = datetime.utcnow()
    range_end = datetime(year=today.year, month=today.month, day=today.day,
                         hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)  # include today in range
    range_begin = range_end - timedelta(days=days)

    expected_in_query = [
        "SELECT SUM(locked_stake)",
        "AS locked_stake",

        f"FROM {Crawler.NODE_MEASUREMENT} WHERE time >= '{MayaDT.from_datetime(range_begin).rfc3339()}' AND "
        f"time < '{MayaDT.from_datetime(range_end).rfc3339()}'",

        "GROUP BY staker_address, time(1d)) GROUP BY time(1d)",
    ]

    mock_influxdb_client.query.assert_called_once()
    mock_query_object.get_points.assert_called_once()

    call_args_list = mock_influxdb_client.query.call_args_list
    assert len(call_args_list) == 1
    for idx, execute_call in enumerate(call_args_list):
        query = execute_call[0][0]
        for statement in expected_in_query:
            assert statement in query

    # check results
    assert len(locked_tokens_dict) == days

    for idx, key in enumerate(locked_tokens_dict):
        # use of rfc3339 loses milliseconds precision
        date = MayaDT.from_rfc3339(start_date.add(days=idx).rfc3339()).datetime()
        assert key == date

        locked_tokens = locked_tokens_dict[key]
        assert locked_tokens == base_amount + (idx * amount_increment)

    # close must be explicitly called on CrawlerInfluxClient
    mock_influxdb_client.close.assert_not_called()
Beispiel #6
0
def get_last_seen(node_info):
    try:
        slang_last_seen = MayaDT.from_rfc3339(node_info['last_seen']).slang_time()
    except ParserError:
        # Show whatever we have anyways
        slang_last_seen = str(node_info['last_seen'])

    if slang_last_seen == NO_CONNECTION_TO_NODE:
        slang_last_seen = NOT_YET_CONNECTED_TO_NODE

    return slang_last_seen
def generate_node_table_components(node_info: dict, registry) -> dict:
    identity = html.Td(children=html.Div([
        html.A(node_info['nickname'],
               href=f'https://{node_info["rest_url"]}/status',
               target='_blank')
    ]))

    # Fleet State
    fleet_state_div = []
    fleet_state_icon = node_info['fleet_state_icon']
    if fleet_state_icon is not UNKNOWN_FLEET_STATE:
        icon_list = node_info['fleet_state_icon']
        fleet_state_div = icon_list
    fleet_state = html.Td(children=html.Div(fleet_state_div))

    staker_address = node_info['staker_address']

    # Blockchainy (TODO)
    staking_agent = ContractAgency.get_agent(StakingEscrowAgent,
                                             registry=registry)
    current_period = staking_agent.get_current_period()
    last_confirmed_period = staking_agent.get_last_active_period(
        staker_address)
    status = get_node_status(staking_agent, staker_address, current_period,
                             last_confirmed_period)

    etherscan_url = f'https://goerli.etherscan.io/address/{node_info["staker_address"]}'
    try:
        slang_last_seen = MayaDT.from_rfc3339(
            node_info['last_seen']).slang_time()
    except ParserError:
        slang_last_seen = node_info['last_seen']

    components = {
        'Status':
        status,
        'Checksum':
        html.Td(
            html.A(f'{node_info["staker_address"][:10]}...',
                   href=etherscan_url,
                   target='_blank')),
        'Nickname':
        identity,
        'Launched':
        html.Td(node_info['timestamp']),
        'Last Seen':
        html.Td([slang_last_seen, f" | Period {last_confirmed_period}"]),
        'Fleet State':
        fleet_state
    }

    return components
Beispiel #8
0
def _get_rows_from_xml(filepath: str, creation_date_start: MayaDT):
    """Parse the comments xml file and yield all row elements after the given creation date."""
    parser = iter(ElementTree.iterparse(filepath, events=['start', 'end']))
    _, root = next(parser)
    month = 0
    for event, elem in parser:
        if event == 'end' and elem.tag == 'row':
            cd = MayaDT.from_rfc3339(elem.attrib['CreationDate'])
            if cd.month != month:
                month = cd.month
            if creation_date_start is None or creation_date_start <= cd:
                yield elem
            root.clear()
Beispiel #9
0
def _post_xml_row_to_model(elem,
                           question_ids: Set[int] = None,
                           target_post_type: PostType = PostType.QUESTION):
    """Convert an xml row from the Posts.xml file to a model. Text is sanitized
    before conversion.
    
    question_ids is only applicable if the target post type is
    PostType.ANSWER. An answer is only added if its parent_id is
    contained in question_ids.
    """
    try:
        post_type = PostType(int(elem.attrib['PostTypeId']))
    except ValueError:  # was not a question or answer
        return None

    # early returns
    if target_post_type != post_type:
        return None
    if target_post_type == PostType.ANSWER and int(
            elem.attrib['ParentId']) not in question_ids:
        return None
    try:
        sanitized = sanitize_post(elem.attrib['Body'])
    except ValueError:
        LOGGER.error(
            f"Sanitization failed for Post with Id={elem.attrib['Id']}")
        return None

    date = MayaDT.from_rfc3339(elem.attrib['CreationDate']).date
    if post_type == PostType.ANSWER:
        title = None
        tags = None
        parent_id = elem.attrib['ParentId']
    else:  # is question
        title = elem.attrib['Title']
        tags = elem.attrib['Tags']
        parent_id = None
    post = Post(id=elem.attrib['Id'],
                creation_date=date,
                post_type_id=post_type.value,
                title=title,
                text=sanitized,
                tags=tags,
                parent_id=parent_id)
    return post
Beispiel #10
0
    def get_historical_work_orders_over_range(self, days: int):
        range_begin, range_end = self._get_range_bookends(days)
        results = list(
            self._client.query(
                f"SELECT SUM(work_orders) FROM "
                f"("
                f"SELECT staker_address, LAST(work_orders)"
                f"FROM {Crawler.NODE_MEASUREMENT} WHERE "
                f"time >= '{MayaDT.from_datetime(range_begin).rfc3339()}' AND "
                f"time < '{MayaDT.from_datetime(range_end).rfc3339()}' "
                f"GROUP BY staker_address, time(1d)"
                f") "
                "GROUP BY time(1d)").get_points())  # 1 day measurements
        work_orders_dict = OrderedDict()
        for r in results:
            num_work_orders = r['sum']
            work_orders_dict[MayaDT.from_rfc3339(r['time']).datetime(
            )] = num_work_orders if num_work_orders else 0

        return work_orders_dict
Beispiel #11
0
def _comment_xml_row_to_model(elem, post_ids: Set[int]):
    """Convert an xml row from the Comments.xml file to a model. Text is
    sanitized before conversion.
    
    Return None if the post_id is not contained in post_ids.
    """
    post_id = int(elem.attrib['PostId'])
    if post_id not in post_ids:
        return None
    try:
        sanitized = sanitize_comment(elem.attrib['Text'])
    except Exception as e:
        LOGGER.error(
            f"Sanitization failed for Comment with Id={elem.attrib['Id']}\n"
            f"{type(e).__name__}\n{str(e)}")
        return None

    date = MayaDT.from_rfc3339(elem.attrib['CreationDate']).date
    comment = Comment(id=elem.attrib['Id'],
                      creation_date=date,
                      text=sanitized,
                      post_id=post_id)
    return comment
Beispiel #12
0
def to_maya(commit):
    return MayaDT.from_rfc3339(commit['timestamp'])
Beispiel #13
0
INITIAL_SUPPLY = NU(1_000_000_000, 'NU')

UNIVERSITY_INITIAL_SUPPLY = NU(19_500_000, 'NU')
SAFT2_INITIAL_SUPPLY = NU(value=(SAFT2_ALLOCATION_PERCENTAGE * INITIAL_SUPPLY.to_nunits()), denomination='NuNit')
TEAM_INITIAL_SUPPLY = NU(value=(TEAM_ALLOCATION_PERCENTAGE * INITIAL_SUPPLY.to_nunits()), denomination='NuNit')
NUCO_INITIAL_SUPPLY = NU(value=(NUCO_ALLOCATION_PERCENTAGE * INITIAL_SUPPLY.to_nunits()), denomination='NuNit')

SAFT1_SUPPLY = NU(value=(SAFT1_ALLOCATION_PERCENTAGE * INITIAL_SUPPLY.to_nunits()), denomination='NuNit')
CASI_SUPPLY = NU(9_000_000, 'NU')

NUCO_VESTING_MONTHS = 5 * 12
WORKLOCK_VESTING_MONTHS = 6
UNIVERSITY_VESTING_MONTHS = 3 * 12
SAFT2_TEAM_VESTING_MONTHS = 24

LAUNCH_DATE = MayaDT.from_rfc3339('2020-10-15T00:00:00.0Z')
DAYS_PER_MONTH = 30.416  # value used in csv allocations


def months_transpired_since_launch(now: MayaDT) -> int:
    """
    Determines the number of months transpired since the launch date, Oct 15, 2020 00:00:00 UTC, based on how
    monthly durations were calculated when allocations were distributed.
    """
    days_transpired = (now - LAUNCH_DATE).days
    months_transpired = days_transpired / DAYS_PER_MONTH

    months_transpired_ceil = math.ceil(months_transpired)
    # calculation of vesting days (based on months) done during allocation
    rounded_up_months_min_duration_days = round(months_transpired_ceil * DAYS_PER_MONTH)