Exemple #1
0
    def test_lexical_analyze_minus_equal_decrement_minus(self):
        # Test minus_equal, decrement, and minus

        source_code = """var a = 1 - 2
        a -= 1
        a--
        """

        with open("testing.simc", "w") as file:
            file.write(source_code)

        table = SymbolTable()

        tokens = lexical_analyze("testing.simc", table)

        minus_equal = Token("minus_equal", "", 2)
        decrement = Token("decrement", "", 3)
        minus = Token("minus", "", 1)

        self.assertEqual(tokens[8], minus_equal)
        self.assertEqual(tokens[12], decrement)
        self.assertEqual(tokens[4], minus)
Exemple #2
0
    def test_lexical_analyze_less_than_less_than_equal_left_shift(self):
        # Test less_than, less_than_equal, left_shift

        source_code = """1 < 2
        1 <= 2
        1 << 2
        """

        with open("testing.simc", "w") as file:
            file.write(source_code)

        table = SymbolTable()

        tokens = lexical_analyze("testing.simc", table)

        less_than = Token("less_than", "", 1)
        less_than_equal = Token("less_than_equal", "", 2)
        left_shift = Token("left_shift", "", 3)

        self.assertEqual(tokens[1], less_than)
        self.assertEqual(tokens[5], less_than_equal)
        self.assertEqual(tokens[9], left_shift)
Exemple #3
0
    def test_numeric_val_double(self):
        source_code = "3.1415914159\0"
        i = 0
        table = SymbolTable()
        line_num = 1

        token, _ = numeric_val(source_code, i, table, line_num)
        other = Token("number", 1, 1)

        self.assertEqual(token, other)

        self.assertEqual(table.symbol_table,
                         {1: ["3.1415914159", "double", "constant"]})
Exemple #4
0
    def test_lexical_analyze_plus_equal_increment_plus(self):
        # Test plus_equal, increment, and plus

        source_code = """var a = 1 + 2
        a += 1
        a++
        """

        with open("testing.simc", "w") as file:
            file.write(source_code)

        table = SymbolTable()

        tokens = lexical_analyze("testing.simc", table)

        plus_equal = Token("plus_equal", "", 2)
        increment = Token("increment", "", 3)
        plus = Token("plus", "", 1)

        self.assertEqual(tokens[8], plus_equal)
        self.assertEqual(tokens[12], increment)
        self.assertEqual(tokens[4], plus)
Exemple #5
0
    def test_string_val_char(self):
        source_code = '"h"\\0'
        i = 0
        table = SymbolTable()
        line_num = 1

        token, _ = string_val(source_code, i, table, line_num)

        other = Token("string", 1, 1)

        self.assertEqual(token, other)

        self.assertEqual(table.symbol_table, {1: ["'h'", "char", "constant"]})
Exemple #6
0
    def test_lexical_analyze_greater_than_greater_than_equal_right_shift(self):
        # Test greater_than, greater_than_equal, right_shift

        source_code = """1 > 2
        1 >= 2
        1 >> 2
        """

        with open("testing.simc", "w") as file:
            file.write(source_code)

        table = SymbolTable()

        tokens = lexical_analyze("testing.simc", table)

        greater_than = Token("greater_than", "", 1)
        greater_than_equal = Token("greater_than_equal", "", 2)
        right_shift = Token("right_shift", "", 3)

        self.assertEqual(tokens[1], greater_than)
        self.assertEqual(tokens[5], greater_than_equal)
        self.assertEqual(tokens[9], right_shift)
Exemple #7
0
    def test_lexical_analyze_colon(self):
        source_code = """:
        """

        with open("testing.simc", "w") as file:
            file.write(source_code)

        table = SymbolTable()

        tokens = lexical_analyze("testing.simc", table)

        colon = Token("colon", "", 1)

        self.assertEqual(tokens[0], colon)
Exemple #8
0
    def test_lexical_analyze_not_equal(self):
        source_code = """1 != 2
        """

        with open("testing.simc", "w") as file:
            file.write(source_code)

        table = SymbolTable()

        tokens = lexical_analyze("testing.simc", table)

        not_equal = Token("not_equal", "", 1)

        self.assertEqual(tokens[1], not_equal)
def get_swap_amount(web3, amount, token0_name, token1_name):
    """Returns the number of token1 tokens you can buy for a given number of 
    token0 tokens"""
    exchange_address, first_token_name, second_token_name = getExchangeAddressForTokenPair(
        token0_name, token1_name)
    exchange = web3.eth.contract(address=exchange_address, abi=exchange_abi)
    reserves = exchange.functions.getReserves().call()
    if token0_name == second_token_name:
        reserves[0], reserves[1] = reserves[1], reserves[0]

    if reserves[0] == 0 or reserves[1] == 0:
        return 0

    # TODO: replace this with the real function (commented below) once web3.py
    # supports solidity >= 0.6
    amount_out = get_amount_out__uniswap_router(
        amount * 10**Token().from_symbol(token0_name).decimals, reserves[0],
        reserves[1])
    # amount_out = self._router.functions.getAmountOut(
    #     amount * 10**token0_decimals,
    #     reserves[0],
    #     reserves[1]).call()
    return amount_out / 10**Token().from_symbol(token1_name).decimals
Exemple #10
0
def get_list_of_terms(tokenizedTermList, docId):
    token_list = list()
    term_dict = list()
    # Loops through all the terms in the document and adds them to the list with their associated docId
    for term in tokenizedTermList:
        term = normalize(term)
        if term != '':
            if (not (term in term_dict)):
                tokenObj = Token(term.encode('UTF8'), docId.encode('UTF8'))
                token_list.append(tokenObj)
                #To remove duplicates uncomment the following
                #term_dict.append(term)

    return token_list
Exemple #11
0
def numeric_val(source_code, i, table, line_num):
    """
    Processes numeric values in the source code

    Params
    ======
    source_code (string)      = The string containing simc source code
    i           (int)         = The current index in the source code
    table       (SymbolTable) = Symbol table constructed holding information about identifiers and constants
    line_num    (int)         = Line number

    Returns
    =======
    Token, int: The token generated for the numeric constant and the current position in source code,
                this is done only if there is no error in the numeric constant
    """

    numeric_constant = ""

    # Loop until we get a non-digit character
    while is_digit(source_code[i]):
        numeric_constant += source_code[i]
        i += 1

    # If a numeric constant contains more than 1 decimal point (.) then that is invalid
    if numeric_constant.count(".") > 1:
        error(
            "Invalid numeric constant, cannot have more than one decimal point in a"
            " number!",
            line_num,
        )

    # Check the length after . to distinguish between float and double
    length = len(
        numeric_constant.split(".")[1]) if "." in numeric_constant else 0

    # Determine type of numeric value
    type = "int"
    if length != 0:
        if length <= 7:
            type = "float"
        elif length >= 7:
            type = "double"

    # Make entry in symbol table
    id = table.entry(numeric_constant, type, "constant")

    # Return number token and current index in source code
    return Token("number", id, line_num), i
Exemple #12
0
def string_val(source_code, i, table, line_num, start_char='"'):
    """
    Processes string values in the source code

    Params
    ======
    source_code (string) = The string containing simc source code
    i           (int)    = The current index in the source code
    table       (SymbolTable) = Symbol table constructed holding information about identifiers and constants
    line_num    (int)         = Line number
    start_char  (str) (Optional) = Character with which string starts

    Returns
    =======
    Token, int: The token generated for the string constant and the current position in source code,
                this is done only if there is no error in the string constant
    """

    string_constant = ""

    # Skip the first " so that the string atleast makes into the while loop
    i += 1

    # Loop until we get a non-digit character
    while source_code[i] != start_char:
        if source_code[i] == "\0":
            error("Unterminated string!", line_num)

        string_constant += source_code[i]
        i += 1

    # Skip the " character so that it does not loop back to this function incorrectly
    i += 1

    # Determine the type of data
    type = "char"
    if len(string_constant) > 1:
        type = "string"

    # Put appropriate quote
    string_constant = ('"' + string_constant +
                       '"' if type == "string" else "'" + string_constant +
                       "'")

    # Make entry in symbol table
    id = table.entry(string_constant, type, "constant")

    # Return string token and current index in source code
    return Token("string", id, line_num), i
Exemple #13
0
    def test_keyword_identifier_identifier(self):
        # Test an identifier

        source_code = "a\\0"
        i = 0
        table = SymbolTable()
        line_num = 1

        token, _ = keyword_identifier(source_code, i, table, line_num)

        other = Token("id", 1, 1)

        self.assertEqual(token, other)

        self.assertEqual(table.symbol_table, {1: ["a", "var", "variable"]})
Exemple #14
0
    def test_lexical_analyze_address_of(self):
        source_code = """var a = 1
        var *n = &a
        """

        with open("testing.simc", "w") as file:
            file.write(source_code)

        table = SymbolTable()

        tokens = lexical_analyze("testing.simc", table)

        address_of = Token("address_of", "", 2)

        self.assertEqual(tokens[-3], address_of)
 def tokenize(cls, text):
     """
     Tokenizes the text
     :param text: the raw text
     :return: tokens (dict format)
     """
     tokens = {}
     parts = WordTokenizer.tokenize(text)
     current = 0
     id = 0
     for i, part in enumerate(parts):
         tokens[i] = Token(part)
         current += len(part)
         id += 1
     tokens = cls.get_types(tokens)
     return tokens
Exemple #16
0
def string_val(source_code, i, table, scanner_obj, start_char='"'):
    """
    Processes string values in the source code
    Params
    ======
    source_code (str)
        : The string containing simc source code
    i           (int)
        : The current index in the source code
    table       (SymbolTable)
        : Symbol table constructed holding information about identifiers and constants
    scanner_obj (Scanner)
        : Instance of Scanner class
    start_char  (str) (Optional)
        : Character with which string starts
    Returns
    =======
    (Token)
        : The token generated for the string constant
    (int)
        : Current position in source code
    """

    string_constant = ""

    # Skip the first "/' so that the string atleast makes into the while loop
    i += 1

    # Loop until we get a non-digit character
    while source_code[i] != start_char:
        if source_code[i] == "\0":
            error("Unterminated string!", scanner_obj.line_num)

        string_constant += source_code[i]
        i += 1

    # Skip the "/' character so that it does not loop back to this function incorrectly
    i += 1

    # Put appropriate quote
    string_constant = '"' + string_constant + '"'

    # Make entry in symbol table
    id = table.entry(string_constant, "string", "constant")

    # Return string token and current index in source code
    return Token("string", id, scanner_obj.line_num), i
Exemple #17
0
def gen_unindent(scanner_obj):
    """
    Generates unindent token
    Params
    ======
    scanner_obj (Scanner)
        : Instance of Scanner class
    """

    if scanner_obj.unindentLevel > 0:
        print("Here-", scanner_obj.indentLevel)
        while scanner_obj.unindentLevel != 0:
            token = Token("unindent", "", scanner_obj.line_num)
            scanner_obj.tokens.append(token)

            scanner_obj.unindentLevel -= 1
            scanner_obj.indentLevel -= 1

        scanner_obj.isIndent = False
 def add_r_lines(cls, current, res, tokens, id, spaces, new_lines):
     """
     Adds the \r by the list of positions
     :param current: the number of the current letter
     :param res: the list of the \r positions
     :param tokens: the tokens
     :param id: the number of the part of the text
     :param spaces: the list of the spaces positions
     :param new_lines: the list of the new lines positions \n
     :return: the new current position, the tokens, the number of the part
     """
     if current in res:
         tokens[id] = Token(u'\r')
         id += 1
         current += 1
         current, tokens, id = cls.add_spaces(current, spaces, tokens, id, new_lines, res)
         current, tokens, id = cls.add_new_lines(current, new_lines, tokens, id, res, spaces)
         current, tokens, id = cls.add_r_lines(current, res, tokens, id, spaces, new_lines)
     return current, tokens, id
Exemple #19
0
def get_reserves(web3, bpool_address):
    """Get the reserves of swappable assets, in units of tokens, of a particular 
    balancer pool.
    Returns a list like: 
    [
        (token_address, token_balance),
        (token_address, token_balance)
    ] """
    bpool = web3.eth.contract(address=bpool_address, abi=bpool_abi)
    tokens_in_pool = bpool.functions.getFinalTokens().call()
    # print('tokens in pool: {}'.format(tokens_in_pool))
    result = []

    for address in tokens_in_pool:
        # print('address: {}'.format(address))
        decimals = Token().from_address(address).decimals
        # print('--decimals: {}'.format(decimals))
        balance = bpool.functions.getBalance(address).call()
        # print('--balance: {}'.format(balance / 10**decimals))
        result.append((address, balance / 10**decimals))

    return result
    def getNextToken(self):
        #remove space
        while self.cur_pos < self.str_len and self.str[self.cur_pos] == ' ':
            self.cur_pos += 1
        self.cur_token = Token("", "")
        #check for end of file
        if self.cur_pos >= self.str_len:
            self.cur_token = Token("", "EOF")
            return self.cur_token

        #if the first char in stopsymb: &,=><!|;(){}+-*/
        if self.str[self.cur_pos] in self.StopSymb:
            #if the char belong to consymb: & = > < | !, tell if str[cur_pos,cur_pos+1] in consymb
            if self.cur_pos + 1 < self.str_len and self.str[
                    self.cur_pos:self.cur_pos + 2] in self.MulSymb:
                self.cur_token = Token(
                    self.str[self.cur_pos:self.cur_pos + 2],
                    self.MulSymb[self.str[self.cur_pos:self.cur_pos + 2]])
                self.cur_pos += 2
            #single character in stopsymb, but a pair of characters not in MulSymb, and the single in the diff set, then it is an "error"
            elif self.str[self.cur_pos] in self.InSymb:
                self.cur_token = Token(self.str[self.cur_pos], "Error")
                self.cur_pos += 1
            #or a real stopsymb: if it's not a consymb, then it's a real stopsymb
            else:
                self.cur_token = Token(self.str[self.cur_pos],
                                       self.str[self.cur_pos])
                self.cur_pos += 1
        #else count the continues string to tell if it belongs to NUM OR REAL OR MULTI-symbol
        else:
            pos = self.cur_pos
            type_record = {"alpha": 0, "number": 0, "dot": 0}
            while pos < self.str_len and self.str[pos] != ' ' and self.str[
                    pos] not in self.StopSymb:
                if self.str[pos].isalpha():
                    type_record["alpha"] += 1
                if self.str[pos] == '.':
                    type_record["dot"] += 1
                if self.str[pos].isdigit():
                    type_record["number"] += 1
                pos += 1
            self.cur_token = Token(
                self.str[self.cur_pos:pos],
                self.type_check(self.str[self.cur_pos:pos], type_record))
            self.cur_pos = pos
        return self.cur_token
Exemple #21
0
    echo %MIST_TOKEN%

Note that the env var is only available in a NEW command window. The env
var is now permanently avaialble to all processes running under the current
user account. The env var will not be available to other users on the same
machine.

To remove the env var value, set the env var with a blank value in a command
window (the env var will still exist, but will have no value):

    setx MIST_TOKEN ""

Or, alternatively it may be deleted via the Windows 10 GUI environmental
variable editing tool: Start > Control Panel > System & Security >
System > Advanced System Settings > Environmental Variables (User section)

"""
from token_class import Token

# create Token obj
master_token_obj = Token()

# get a temporary token so that we can do some stuff
temp_mist_token = master_token_obj.get_tmp_token()

# do some stuff here (e.g. list WLANs)
# TBA

# clean up by removing our temporary token
master_token_obj.delete_tmp_token()
    async def _update(self, timeout=10.0):
        eth_prices = [
            get_price(self._w3, "DAI", "WETH"),
            get_price(self._w3, "USDT", "WETH"),
            get_price(self._w3, "USDC", "WETH"),
        ]
        # TODO: weighted average would be better than a simple average
        self.eth_price_usd = sum(eth_prices) / len(eth_prices)

        # matic_price_eth = get_price(self._w3, "WETH", "WMATIC")
        # self.matic_price_usd = matic_price_eth * self.eth_price_usd

        # swam_price_eth = get_price(self._w3, "WETH", "SWAM")
        # self.swam_price_usd = swam_price_eth * self.eth_price_usd

        total_liquidity_tokens = 0
        price_usd_weighted_average = WeightedAverage()
        # check each token that <self.currency_symbol> is paired with
        for exchange_contract in self._exchanges:
            token0_address = exchange_contract.functions.token0().call().lower(
            )
            token1_address = exchange_contract.functions.token1().call().lower(
            )
            paired_token_address = token0_address if token1_address.lower(
            ) == Token().from_symbol(
                self.currency_symbol).address.lower() else token1_address
            try:
                paired_token_symbol = Token().from_address(
                    paired_token_address).symbol
            except NoTokenMatchError:
                logging.warning(
                    f"no token with address {paired_token_address} found (need to edit token_class.py); skipping"
                )
                continue

            try:
                liquidity_tokens, liquidity_pair = get_reserves(
                    self._w3, self.currency_symbol, paired_token_symbol)
            except PairNotDefinedError:
                logging.warning(
                    f"pair {self.currency_symbol}-{paired_token_symbol} not found; skipping"
                )
                continue

            if liquidity_tokens < 0.001:
                continue

            total_liquidity_tokens += liquidity_tokens

            if paired_token_symbol == "WETH":
                self.price_eth = get_price(self._w3, paired_token_symbol,
                                           self.currency_symbol)
                price_usd_weighted_average.add(
                    self.price_eth * self.eth_price_usd, liquidity_tokens)
                self.liquidity_eth = liquidity_pair
            else:

                # get the paired token's price in Eth. If there is less than $500 in
                # liquidity to determine this, then skip this pair when determining price.
                try:
                    liquidity_eth, _ = get_reserves(self._w3, "WETH",
                                                    paired_token_symbol)
                except PairNotDefinedError:
                    logging.warning(
                        f"pair WETH-{paired_token_symbol} not found; skipping")
                    continue

                if liquidity_eth < 500 / self.eth_price_usd:
                    continue

                paired_token_price_in_eth = get_price(self._w3, "WETH",
                                                      paired_token_symbol)
                paired_token_price_in_usd = paired_token_price_in_eth * self.eth_price_usd

                # get the price <self.currency_symbol> in terms of the paired token
                price_in_paired_token = get_price(self._w3,
                                                  paired_token_symbol,
                                                  self.currency_symbol)

                price_usd_weighted_average.add(
                    price_in_paired_token * paired_token_price_in_usd,
                    liquidity_tokens)

        self.liquidity_tokens = total_liquidity_tokens
        self.price_usd = price_usd_weighted_average.average()

        try:
            self.price_eth = get_price(self._w3, "WETH", self.currency_symbol)
        except PairNotDefinedError:
            logging.warning(
                f"Failed to get WETH pair for {self.currency_symbol}; calculating backwards using average USD price"
            )
            self.price_eth = self.price_usd / self.eth_price_usd

        self.volume_tokens = await self._update_24h_volume()
        self.volume_eth = self.volume_tokens * self.price_eth
Exemple #23
0
 def addToken(self, ttype: TokenType, literal: object = None):
     text = self.source[self.start : self.current]
     self.tokens.append(Token(ttype, text, literal, self.line))
Exemple #24
0
    async def _update_all_values(self,
                                 timeout=10.0,
                                 should_update_volume=False):

        if should_update_volume:
            current_eth_block = self._w3.eth.blockNumber

        self.price_eth = None

        eth_prices = [
            get_price(self._uniswap_api, "DAI", "WETH", _DEFAULT_PAIR_FEE),
            get_price(self._uniswap_api, "USDT", "WETH", _DEFAULT_PAIR_FEE),
            get_price(self._uniswap_api, "USDC", "WETH", _DEFAULT_PAIR_FEE),
        ]
        self.eth_price_usd = sum(eth_prices) / len(
            eth_prices)  # TODO: should be weighted average

        price_usd_weighted_average = WeightedAverage()
        total_liquidity_tokens = 0
        total_volume_tokens = 0

        for exchange_address in getExchangeAddressesForToken(
                self.currency_symbol):
            token0_name, token1_name, fee = getTokensFromExchangeAddress(
                exchange_address)
            token0_address = Token().from_symbol(token0_name).address
            token1_address = Token().from_symbol(token1_name).address
            #paired_token_address = token0_address if token1_address.lower() == Token().from_symbol(self.currency_symbol).address.lower() else token1_address
            #paired_token_symbol = Token().from_address(paired_token_address).symbol

            try:
                price_usd, liquidity_tokens = await self._get_price_and_liquidity_for_pair(
                    token0_address, token1_address, fee)
            except (NoTokenMatchError, PairNotDefinedError) as e:
                logging.warning(
                    f"Failed to update {self.exchange_name} pair: {str(e)}")
                continue
            except NoLiquidityException:
                # no liquidity is not an error; simply skip this exchange
                continue
            else:
                price_usd_weighted_average.add(price_usd, liquidity_tokens)
                total_liquidity_tokens += liquidity_tokens

                if should_update_volume and liquidity_tokens > _MINIMUM_ALLOWED_LIQUIDITY_TOKENS_TO_CHECK_VOLUME:
                    try:
                        volume_tokens, volume_pair = await self._get_volume_for_pair(
                            token0_address,
                            token1_address,
                            fee,
                            current_eth_block=current_eth_block,
                            timeout=timeout)
                        total_volume_tokens += volume_tokens
                    except requests.exceptions.ReadTimeout:
                        logging.warning(
                            f"Failed to update Uniswapv3API volume: ReadTimeout"
                        )

        self.price_usd = price_usd_weighted_average.average()
        self.price_eth = self.price_usd / self.eth_price_usd
        self.liquidity_tokens = total_liquidity_tokens
        self.liquidity_eth = self.liquidity_tokens * self.price_eth

        if should_update_volume:
            self.hourly_volume_tokens.append(total_volume_tokens)
            # trim list to 168 hours (7 days)
            self.hourly_volume_tokens = self.hourly_volume_tokens[-168:]
            # use last 24 hours for volume
            self.volume_tokens = sum(self.hourly_volume_tokens[-24:])
            self.volume_eth = self.volume_tokens * self.price_eth
            # NOTE: this sets _time_volume_last_updated even if all volume updates
            #       failed. This is OK for now, it throttles struggling APIs (matic) but
            #       may not be the ideal behavior.
            self._mark_volume_as_updated()
    async def _get_volume_at_exchange_contract(self,
                                               exchange_contract,
                                               num_hours_into_past=1,
                                               timeout=10.0):
        volume_tokens = 0  # volume in units of <self.currency_symbol> tokens
        volume_pair = 0  # volume in units of the paired token

        swap_topic = "0xd78ad95fa46c994b6551d0da85fc275fe613ce37657fb8d5e3d130840159d822"
        sync_topic = "0x1c411e9a96e071241c2f21f7726b17ae89e3cab4c78be50e062b03a9fffbbad1"
        burn_topic = "0xdccd412f0b1252819cb1fd330b93224ca42612892bb3f4f789976e6d81936496"
        transfer_topic = "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"
        approval_topic = "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925"
        mint_topic = "0x4c209b5fc8ad50758f13e2e1088ba56a560dff690a1c6fef26394f4c03821c4f"

        token0_address = exchange_contract.functions.token0().call()
        token1_address = exchange_contract.functions.token1().call()

        current_eth_block = self._w3.eth.blockNumber

        for event in self._w3.eth.getLogs({
                'fromBlock':
                current_eth_block -
            (int(60 * 60 * num_hours_into_past / SECONDS_PER_ETH_BLOCK)),
                'toBlock':
                current_eth_block - 1,
                'address':
                exchange_contract.address
        }):
            topic0 = self._w3.toHex(event['topics'][0])
            if topic0 == swap_topic:
                #print('swap in tx', self._w3.toHex(event['transactionHash']))
                receipt = self._w3.eth.getTransactionReceipt(
                    event['transactionHash'])
                parsed_logs = exchange_contract.events.Swap().processReceipt(
                    receipt)

                correct_log = None
                for log in parsed_logs:
                    if log.address.lower() == exchange_contract.address.lower(
                    ):
                        correct_log = log
                if correct_log is None:
                    logging.warning('bad swap transaction {}'.format(
                        self._w3.toHex(event['transactionHash'])))
                    continue

                #sender_address = correct_log.args.sender
                #to_address = correct_log.args.to
                amount0In = correct_log.args.amount0In
                amount1In = correct_log.args.amount1In
                amount0Out = correct_log.args.amount0Out
                amount1Out = correct_log.args.amount1Out
                #block_number = correct_log.blockNumber

                if Token().from_address(token0_address).symbol.lower(
                ) == self.currency_symbol.lower():
                    # token0 is the tracked currency symbol
                    volume_tokens += abs(
                        (amount0In - amount0Out) /
                        10**Token().from_address(token0_address).decimals)
                    volume_pair += abs(
                        (amount1In - amount1Out) /
                        10**Token().from_address(token1_address).decimals)
                elif Token().from_address(token1_address).symbol.lower(
                ) == self.currency_symbol.lower():
                    # token1 is the tracked currency symbol
                    volume_tokens += abs(
                        (amount1In - amount1Out) /
                        10**Token().from_address(token1_address).decimals)
                    volume_pair += abs(
                        (amount0In - amount0Out) /
                        10**Token().from_address(token0_address).decimals)

                # print('    token', getTokenNameFromAddress(token0_address), 'send to exchange', (amount0In - amount0Out) / 10**getTokenDecimalsFromAddress(token0_address), getTokenNameFromAddress(token0_address))
                # print('    token', getTokenNameFromAddress(token1_address), 'send to exchange', (amount1In - amount1Out) / 10**getTokenDecimalsFromAddress(token1_address), getTokenNameFromAddress(token1_address))

                continue

            elif topic0 == mint_topic:
                # skip liquidity deposits/withdrawals
                continue
            elif topic0 == sync_topic:
                continue
            elif topic0 == burn_topic:
                continue
            elif topic0 == transfer_topic:
                continue
            elif topic0 == approval_topic:
                continue
            else:
                logging.debug('unknown topic txhash {}'.format(
                    self._w3.toHex(event['transactionHash'])))
                logging.debug('unknown topic topic0 {}'.format(topic0))

        return volume_tokens, volume_pair
Exemple #26
0
    async def _get_volume_for_pair(self,
                                   token0_address,
                                   token1_address,
                                   fee,
                                   num_hours_into_past=1,
                                   current_eth_block=None,
                                   timeout=10.0):
        volume_tokens = 0  # volume in units of <self.currency_symbol> tokens
        volume_pair = 0  # volume in units of the paired token

        token0_address, token1_address = sorted(
            [token0_address, token1_address])
        token0_decimals = Token().from_address(token0_address).decimals
        token1_decimals = Token().from_address(token1_address).decimals

        # https://docs.uniswap.org/reference/core/interfaces/pool/IUniswapV3PoolEvents
        swap_topic = "0xc42079f94a6350d7e6235f29174924f928cc2ac818eb64fed8004e115fbcca67"
        collect_topic = "0x70935338e69775456a85ddef226c395fb668b63fa0115f5f20610b388e6ca9c0"
        # this event seems to throw when a collect occurs, but only includes the first 3 parameters?
        cloned_collect_topic = "0x0c396cd989a39f4459b5fa1aed6a9a8dcdbc45908acfd67e028cd568da98982c"

        exchange_address, _, _ = getExchangeAddressForTokenPair(
            Token().from_address(token0_address).symbol,
            Token().from_address(token1_address).symbol, fee)

        if current_eth_block is None:
            current_eth_block = self._w3.eth.blockNumber

        for event in self._w3.eth.getLogs({
                'fromBlock':
                current_eth_block -
            (int(60 * 60 * num_hours_into_past / SECONDS_PER_ETH_BLOCK)),
                'toBlock':
                current_eth_block - 1,
                'address':
                exchange_address
        }):
            topic0 = self._w3.toHex(event['topics'][0])
            if topic0 == swap_topic:
                receipt = self._w3.eth.getTransactionReceipt(
                    event['transactionHash'])
                # address sender (router address ususally)
                router_address = self._w3.toChecksumAddress(
                    event['topics'][1][-20:])
                # address recipient
                buyer_address = self._w3.toChecksumAddress(
                    event['topics'][2][-20:])

                data = event['data'][2:] if event['data'].startswith(
                    '0x') else event['data']
                # int256 amount 0 (delta of the token0 balance of the pool)
                amount_0 = from_u256_twos_complement(
                    self._w3.toInt(hexstr=data[0:64])) / 10**token0_decimals
                # int256 amount 1 (delta of the token1 balance of the pool)
                amount_1 = from_u256_twos_complement(
                    self._w3.toInt(hexstr=data[64:128])) / 10**token1_decimals
                # uint160 sqrtPriceX96 unused
                # uint128 liquidity unused
                # int24 tick unused

                # print('swap in tx', self._w3.toHex(event['transactionHash']))
                # print(f'amount_0: {amount_0}, amount_1: {amount_1}')

                if Token().from_address(token0_address).symbol.lower(
                ) == self.currency_symbol.lower():
                    # token0 is the tracked currency symbol
                    volume_tokens += abs(amount_0)
                    volume_pair += abs(amount_1)
                elif Token().from_address(token1_address).symbol.lower(
                ) == self.currency_symbol.lower():
                    # token1 is the tracked currency symbol
                    volume_tokens += abs(amount_1)
                    volume_pair += abs(amount_0)
                else:
                    raise RuntimeError(
                        f"bad swap in tx {event['transactionHash']}: token0_address:{token0_address} token1_address:{token1_address}"
                    )

                continue

            elif topic0 == collect_topic:
                # skip liquidity deposits/withdrawals
                continue
            elif topic0 == cloned_collect_topic:
                # skip liquidity deposits/withdrawals
                continue

            else:
                logging.debug('unknown topic txhash {}'.format(
                    self._w3.toHex(event['transactionHash'])))
                logging.debug('unknown topic topic0 {}'.format(topic0))
                continue

        return volume_tokens, volume_pair
Exemple #27
0
    def visitLogicalExpr(self, expr):
        return repr(expr)

    def visitGetExpr(self, expr):
        return repr(expr)

    def visitSetExpr(self, expr):
        return repr(expr)

    def visitThisExpr(self, expr):
        return repr(expr)

    def visitSuperExpr(self, expr):
        return repr(expr)


if __name__ == "__main__":

    from expr import Binary, Grouping, Literal, Unary
    from token_class import Token
    from token_type import TokenType

    expression = Binary(
        Unary(Token(TokenType.MINUS, "-", None, 1), Literal(123)),
        Token(TokenType.STAR, "*", None, 1),
        Grouping(Literal(45.67)),
    )

    print(ASTPrinter().pprint(expression))
Exemple #28
0
def main():
    import warnings
    # Filter out 'MismatchedABI' warnings since web3 throws a warning anytime it sees
    # an event it does not recognise.. not sure why this is a case. It is very loud.
    warnings.filterwarnings("ignore", category=UserWarning)
    web3 = Web3(Web3.HTTPProvider(ETHEREUM_NODE_URL))

    price_dai = get_price(web3, "0x63A63f2cAd45fee80b242436BA71e0f462A4178E",
                          Token("WETH").address,
                          "0xB6eD7644C69416d67B522e20bC294A9a9B405B31")
    print('exchange price existing token:', price_dai)
    price_dai = get_price(web3, "0x63A63f2cAd45fee80b242436BA71e0f462A4178E",
                          Token("DAI").address,
                          "0xB6eD7644C69416d67B522e20bC294A9a9B405B31")
    print('exchange price missing token:', price_dai)

    print()
    print('volume: {}'.format(
        get_volume(web3, "0xDBCd8b30eC1C4b136e740C147112f39D41a10166")))
    print()
    print('{} 0xBTC buys 1 WETH'.format(
        get_price(
            web3,
            "0xDBCd8b30eC1C4b136e740C147112f39D41a10166",
            "0xB6eD7644C69416d67B522e20bC294A9a9B405B31",
            "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
        )))
    print()
    print('{} WETH buys 1 0xBTC'.format(
        get_price(
            web3,
            "0xDBCd8b30eC1C4b136e740C147112f39D41a10166",
            "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
            "0xB6eD7644C69416d67B522e20bC294A9a9B405B31",
        )))
    print()

    print('0xbtc and ETH reserves: {}'.format(
        get_reserves(web3, "0xDBCd8b30eC1C4b136e740C147112f39D41a10166")))
    print()
    print('balance_of_user: {}'.format(
        get_pooled_balance_for_address(
            web3, "0xDBCd8b30eC1C4b136e740C147112f39D41a10166",
            "0xA7165A762099Cc7044d67CD98a3C8699c03e28A7")))

    # print('$1 in USDC will swap for {} 0xBTC tokens'.format(get_swap_amount(web3, 1, "USDC", "0xBTC")))
    # print('$1 in DAI will swap for {} 0xBTC tokens'.format(get_swap_amount(web3, 1, "DAI", "0xBTC")))
    # print('1 0xBTC token will swap for {} DAI'.format(get_swap_amount(web3, 1, "0xBTC", "DAI")))
    # print('100 0xBTC tokens will swap for {} DAI'.format(get_swap_amount(web3, 100, "0xBTC", "DAI")))
    # print('1 ETH will swap for {} DAI'.format(get_swap_amount(web3, 1, "WETH", "DAI")))
    # print('230 DAI will swap for {} ETH'.format(get_swap_amount(web3, 230, "DAI", "WETH")))
    # print('0xbtc and ETH balances:', get_reserves(web3, "0xBTC", "WETH"))
    # # print('0xbtc and ETH price:', e.get_price("0xBTC", "WETH"), "0xBTC per ETH")
    # # print('0xbtc and ETH price:', e.get_price("WETH", "0xBTC"), "ETH per 0xBTC")
    # print()
    # print('eth usdc reserves ', get_reserves(web3, "WETH", "USDC"))
    # print('1 in ETH will swap for {} USDC '.format(get_swap_amount(web3, 1, "WETH", "USDC")))
    # print('1 in ETH will swap for {} USDT '.format(get_swap_amount(web3, 1, "WETH", "USDT")))
    # print('1 in ETH will swap for {} DAI '.format(get_swap_amount(web3, 1, "WETH", "DAI")))
    # print()

    # get some data from 0xBTC pool via Uniswapv2API
    e = BalancerAPI('0xBTC')
    e.load_once_and_print_values()
    print()
    print('0xbtc-weth liquidity in eth', e.liquidity_eth)
    print('0xbtc-weth liquidity in tokens', e.liquidity_tokens)
Exemple #29
0
 def setUp(self):
     self.token = Token("number", 1, 2)
     self.other = Token("number", 2, 2)
Exemple #30
0
def scanner(source_code, table):
    """
    Generate tokens from source code
    Params
    ======
    source_code (str)
        : Pulse source code
    table       (SymbolTable)
        : Symbol table constructed holding information about identifiers and constants
    Returns
    ========
    tokens: A list of tokens of the source code
    """

    # Create scanner_obj class' object
    scanner_obj = Scanner()

    # Loop through the source code character by character
    i = 0

    # To store comments string
    comment_str = ""

    while source_code[i] != "\0":
        # If a digit appears, call numeric_val function and add the numeric token to list,
        # if it was correct
        if is_digit(source_code[i]):
            token, i = numeric_val(source_code, i, table, scanner_obj)
            scanner_obj.tokens.append(token)

        # If double quote appears the value is a string token
        elif source_code[i] == '"':
            token, i = string_val(source_code, i, table, scanner_obj)
            scanner_obj.tokens.append(token)

        # If single quote appears the value is a string token
        elif source_code[i] == "'":
            token, i = string_val(source_code, i, table, scanner_obj, "'")
            scanner_obj.tokens.append(token)

        # If alphabet or number appears then it might be either a keyword or an identifier
        elif is_alnum(source_code[i]):
            token, i = keyword_identifier(source_code, i, table, scanner_obj)
            scanner_obj.tokens.append(token)

        # If character is : then generate begin block token and start indentation
        elif source_code[i] == ":":
            token = Token("begin_block", "", scanner_obj.line_num)
            scanner_obj.tokens.append(token)
            scanner_obj.isIndent = True
            scanner_obj.indentLevel += 1
            i += 1

        # If character is \n then generate newline token and check for unindentation
        elif source_code[i] == "\n":
            scanner_obj.line_num += 1
            token = Token("newline", "", scanner_obj.line_num)
            scanner_obj.tokens.append(token)
            i = check_unindent(source_code, i + 1, table, scanner_obj)
            gen_unindent(scanner_obj)

        # If character is ( then generate left paren token
        elif source_code[i] == "(":
            scanner_obj.tokens.append(
                Token("left_paren", "", scanner_obj.line_num))
            i += 1

        # If character is ) then generate right paren token
        elif source_code[i] == ")":
            scanner_obj.tokens.append(
                Token("right_paren", "", scanner_obj.line_num))
            i += 1

        # Identifying Left brace token
        elif source_code[i] == "{":
            scanner_obj.tokens.append(
                Token("left_brace", "", scanner_obj.line_num))
            i += 1

        # Identifying right brace token
        elif source_code[i] == "}":
            scanner_obj.tokens.append(
                Token("right_brace", "", scanner_obj.line_num))
            i += 1

        # Identifying assignment or equal token
        elif source_code[i] == "=":
            if source_code[i + 1] == "=":
                scanner_obj.tokens.append(
                    Token("equal", "", scanner_obj.line_num))
                i += 2
            else:
                scanner_obj.tokens.append(
                    Token("assignment", "", scanner_obj.line_num))
                i += 1

        # Identifying plus equal, increment or plus token
        elif source_code[i] == "+":
            if source_code[i + 1] == "=":
                scanner_obj.tokens.append(
                    Token("plus_equal", "", scanner_obj.line_num))
                i += 2
            elif source_code[i + 1] == "+":
                scanner_obj.tokens.append(
                    Token("increment", "", scanner_obj.line_num))
                i += 2
            else:
                scanner_obj.tokens.append(
                    Token("plus", "", scanner_obj.line_num))
                i += 1

        # Identifying minus equal, decrement or minus token
        elif source_code[i] == "-":
            if source_code[i + 1] == "=":
                scanner_obj.tokens.append(
                    Token("minus_equal", "", scanner_obj.line_num))
                i += 2
            elif source_code[i + 1] == "-":
                scanner_obj.tokens.append(
                    Token("decrement", "", scanner_obj.line_num))
                i += 2
            else:
                scanner_obj.tokens.append(
                    Token("minus", "", scanner_obj.line_num))
                i += 1

        # Identifying multiply equal or multiply token
        elif source_code[i] == "*":
            if source_code[i + 1] == "=":
                scanner_obj.tokens.append(
                    Token("multiply_equal", "", scanner_obj.line_num))
                i += 2
            else:
                scanner_obj.tokens.append(
                    Token("multiply", "", scanner_obj.line_num))
                i += 1

        # Identifying single line comment token
        elif source_code[i] == "#":
            i += 1
            while source_code[i] != "\n":
                comment_str += str(source_code[i])
                i += 1
            scanner_obj.tokens.append(
                Token("single_line_comment", comment_str,
                      scanner_obj.line_num))
            comment_str = ""

        # Identifying multi line comment, divide_equal, integer_divide, divide token
        elif source_code[i] == "/":
            if source_code[i + 1] == "*":
                i += 2
                while source_code[i] != "*" and source_code[i + 1] != "/":
                    comment_str += str(source_code[i])
                    i += 1
                scanner_obj.tokens.append(
                    Token("multi_line_comment", comment_str,
                          scanner_obj.line_num))
                comment_str = ""
            elif source_code[i + 1] == "=":
                scanner_obj.tokens.append(
                    Token("divide_equal", "", scanner_obj.line_num))
                i += 2

            elif source_code[i + 1] == "/":
                scanner_obj.tokens.append(
                    Token("integer_divide", "", scanner_obj.line_num))
                i += 2
            else:
                scanner_obj.tokens.append(
                    Token("divide", "", scanner_obj.line_num))
                i += 1

        # Identifying modulus equal or modulus token
        elif source_code[i] == "%":
            if source_code[i + 1] == "=":
                scanner_obj.tokens.append(
                    Token("modulus_equal", "", scanner_obj.line_num))
                i += 2
            else:
                scanner_obj.tokens.append(
                    Token("modulus", "", scanner_obj.line_num))
                i += 1

        # Identifying comma token
        elif source_code[i] == ",":
            scanner_obj.tokens.append(Token("comma", "", scanner_obj.line_num))
            i += 1

        # Identifying not_equal token
        elif source_code[i] == "!" and source_code[i + 1] == "=":
            scanner_obj.tokens.append(
                Token("not_equal", "", scanner_obj.line_num))
            i += 2

        # Identifying greater_than or greater than equal token
        elif source_code[i] == ">":
            if source_code[i + 1] == "=":
                scanner_obj.tokens.append(
                    Token("greater_than_equal", "", scanner_obj.line_num))
                i += 2
            else:
                scanner_obj.tokens.append(
                    Token("greater_than", "", scanner_obj.line_num))
                i += 1

        # Identifying less than or less than equal to token
        elif source_code[i] == "<":
            if source_code[i + 1] == "=":
                scanner_obj.tokens.append(
                    Token("less_than_equal", "", scanner_obj.line_num))
                i += 2
            else:
                scanner_obj.tokens.append(
                    Token("less_than", "", scanner_obj.line_num))
                i += 1

        # Identifying the token left_bracket
        elif source_code[i] == "[":
            scanner_obj.tokens.append(
                Token("token_left_bracket", "", scanner_obj.line_num))
            i += 1

        # Identifying the token right_bracket
        elif source_code[i] == "]":
            scanner_obj.tokens.append(
                Token("token_right_bracket", "", scanner_obj.line_num))
            i += 1

        # If nothing is matched then increment the index
        else:
            i += 1

    # If indentLevel is not 0 then generate unindent tokens until indentLevel is zero
    if scanner_obj.indentLevel > 0:
        while scanner_obj.indentLevel != 0:
            token = Token("unindent", "", scanner_obj.line_num)
            scanner_obj.tokens.append(token)

            scanner_obj.indentLevel -= 1

    # Return the generated tokens
    return scanner_obj.tokens