コード例 #1
0
def ml_parse_value(gmls, default=None):
    """ Parse a value in a macro-language string. """
    c = util.skip(gmls, ml_whitepace)
    sgn = -1 if c == '-' else 1
    if not c:
        raise error.RunError(error.IFC)
    if c in ('+', '-'):
        gmls.read(1)
        c = util.peek(gmls)
        # don't allow default if sign is given
        default = None
    if c == '=':
        gmls.read(1)
        c = util.peek(gmls)
        if len(c) == 0:
            raise error.RunError(error.IFC)
        elif ord(c) > 8:
            name = util.get_var_name(gmls)
            indices = ml_parse_indices(gmls)
            step = var.get_var_or_array(name, indices)
            util.require_read(gmls, (';',), err=error.IFC)
        else:
            # varptr$
            step = get_value_for_varptrstr(gmls.read(3))
    elif c in representation.ascii_digits:
        step = ml_parse_const(gmls)
    elif default is not None:
        step = default
    else:
        raise error.RunError(error.IFC)
    if sgn == -1:
        step = vartypes.number_neg(step)
    return step
コード例 #2
0
ファイル: draw_and_play.py プロジェクト: nestormh/pcbasic
def ml_parse_value(gmls, default=None):
    """ Parse a value in a macro-language string. """
    c = util.skip(gmls, ml_whitepace)
    sgn = -1 if c == '-' else 1
    if c in ('+', '-'):
        gmls.read(1)
        c = util.peek(gmls)
        # don't allow default if sign is given
        default = None
    if c == '=':
        gmls.read(1)
        c = util.peek(gmls)
        if len(c) == 0:
            raise error.RunError(error.IFC)
        elif ord(c) > 8:
            name = util.get_var_name(gmls)
            indices = ml_parse_indices(gmls)
            step = var.get_var_or_array(name, indices)
            util.require_read(gmls, (';',), err=error.IFC)
        else:
            # varptr$
            step = get_value_for_varptrstr(gmls.read(3))
    elif c and c in string.digits:
        step = ml_parse_const(gmls)
    elif default is not None:
        step = default
    else:
        raise error.RunError(error.IFC)
    if sgn == -1:
        step = vartypes.number_neg(step)
    return step
コード例 #3
0
ファイル: tokenise.py プロジェクト: nestormh/pcbasic
def detokenise_line(ins, bytepos=None):
    """ Convert a tokenised program line to ascii text. """
    litstring, comment = False, False
    textpos = 0
    current_line = util.parse_line_number(ins)
    if current_line < 0:
        # parse_line_number has returned -1 and left us at: .. 00 | _00_ 00 1A
        # stream ends or end of file sequence \x00\x00\x1A
        return -1, '', 0
    elif current_line == 0 and util.peek(ins) == ' ':
        # ignore up to one space after line number 0
        ins.read(1)
    output = bytearray(str(current_line))
    # write one extra whitespace character after line number
    # unless first char is TAB
    if util.peek(ins) != '\t':
        output += bytearray(' ')
    # detokenise tokens until end of line
    while True:
        s = ins.read(1)
        if not textpos and ins.tell() >= bytepos:
            textpos = len(output)
        if s in tk.end_line:
            # \x00 ends lines and comments when listed,
            # if not inside a number constant
            # stream ended or end of line
            break
        elif s == '"':
            # start of literal string, passed verbatim
            # until a closing quote or EOL comes by
            # however number codes are *printed* as the corresponding numbers,
            # even inside comments & literals
            output += s
            litstring = not litstring
        elif s in tk.number:
            ins.seek(-1, 1)
            representation.detokenise_number(ins, output)
        elif s in tk.linenum:
            # 0D: line pointer (unsigned int) - this token should not be here;
            #     interpret as line number and carry on
            # 0E: line number (unsigned int)
            output += representation.uint_to_str(bytearray(ins.read(2)))
        elif comment or litstring or ('\x20' <= s <= '\x7E'):
            # honest ASCII
            output += s
        elif s == '\x0A':
            # LF becomes LF CR
            output += '\x0A\x0D'
        elif s <= '\x09':
            # controls that do not double as tokens
            output += s
        else:
            ins.seek(-1, 1)
            comment = detokenise_keyword(ins, output)
    return current_line, output, textpos
コード例 #4
0
ファイル: tokenise.py プロジェクト: boriel/pcbasic
def tokenise_word(ins, outs):
    """ Convert a keyword to tokenised form. """
    word = ''
    while True: 
        c = ins.read(1).upper()
        word += c
        # special cases 'GO     TO' -> 'GOTO', 'GO SUB' -> 'GOSUB'    
        if word == 'GO':
            pos = ins.tell()
            # GO SUB allows 1 space
            if util.peek(ins, 4) == ' SUB':
                word = 'GOSUB'
                ins.read(4)
            else:
                # GOTO allows any number of spaces
                nxt = util.skip(ins, whitespace)
                if ins.read(2) == 'TO':
                    word = 'GOTO'
                else:
                    ins.seek(pos)
            if word in ('GOTO', 'GOSUB'):
                nxt = util.peek(ins).upper()
                if nxt in name_chars:
                    ins.seek(pos)
                    word = 'GO'
                else:
                    pass
        if word in keyword_to_token:
            # ignore if part of a longer name, except FN, SPC(, TAB(, USR
            if word not in ('FN', 'SPC(', 'TAB(', 'USR'):
                nxt = util.peek(ins).upper()
                if nxt in name_chars:  
                    continue
            token = keyword_to_token[word]
            # handle special case ELSE -> :ELSE
            if word == 'ELSE':
                outs.write(':' + token)
            # handle special case WHILE -> WHILE+
            elif word == 'WHILE':
                outs.write(token + tk.O_PLUS)
            else:
                outs.write(token)
            break
        # allowed names: letter + (letters, numbers, .)    
        elif not(c in name_chars): 
            if c!='':
                word = word[:-1]
                ins.seek(-1, 1)
            outs.write(word)            
            break
    return word
コード例 #5
0
def tokenise_word(ins, outs):
    """ Convert a keyword to tokenised form. """
    word = ''
    while True:
        c = ins.read(1).upper()
        word += c
        # special cases 'GO     TO' -> 'GOTO', 'GO SUB' -> 'GOSUB'
        if word == 'GO':
            pos = ins.tell()
            # GO SUB allows 1 space
            if util.peek(ins, 4) == ' SUB':
                word = 'GOSUB'
                ins.read(4)
            else:
                # GOTO allows any number of spaces
                nxt = util.skip(ins, ascii_whitespace)
                if ins.read(2) == 'TO':
                    word = 'GOTO'
                else:
                    ins.seek(pos)
            if word in ('GOTO', 'GOSUB'):
                nxt = util.peek(ins).upper()
                if nxt and nxt in name_chars:
                    ins.seek(pos)
                    word = 'GO'
        if word in keyword_to_token:
            # ignore if part of a longer name, except FN, SPC(, TAB(, USR
            if word not in ('FN', 'SPC(', 'TAB(', 'USR'):
                nxt = util.peek(ins).upper()
                if nxt and nxt in name_chars:
                    continue
            token = keyword_to_token[word]
            # handle special case ELSE -> :ELSE
            if word == 'ELSE':
                outs.write(':' + token)
            # handle special case WHILE -> WHILE+
            elif word == 'WHILE':
                outs.write(token + tk.O_PLUS)
            else:
                outs.write(token)
            break
        # allowed names: letter + (letters, numbers, .)
        elif not c:
            outs.write(word)
            break
        elif c not in name_chars:
            word = word[:-1]
            ins.seek(-1, 1)
            outs.write(word)
            break
    return word
コード例 #6
0
ファイル: representation.py プロジェクト: nestormh/pcbasic
def tokenise_oct(ins, outs):
    """ Convert octal expression in Python string to number token. """
    # O is optional, could also be &777 instead of &O777
    if util.peek(ins).upper() == 'O':
        ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        if not c or c not in string.octdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 8) if word else 0
    outs.write(tk.T_OCT + str(vartypes.value_to_uint(val)))
コード例 #7
0
ファイル: representation.py プロジェクト: nony05/pcbasic
def tokenise_oct(ins, outs):
    """ Convert octal expression in Python string to number token. """
    # O is optional, could also be &777 instead of &O777
    if util.peek(ins).upper() == 'O':
        ins.read(1)
    word = ''
    while True:
        c = util.peek(ins).upper()
        if not c or c not in ascii_octits:
            break
        else:
            word += ins.read(1).upper()
    val = int(word, 8) if word else 0
    outs.write(tk.T_OCT + str(vartypes.value_to_uint(val)))
コード例 #8
0
def auto_step():
    """ Generate an AUTO line number and wait for input. """
    numstr = str(state.basic_state.auto_linenum)
    console.write(numstr)
    if state.basic_state.auto_linenum in state.basic_state.line_numbers:
        console.write('*')
        line = bytearray(console.wait_screenline(from_start=True))
        if line[:len(numstr)+1] == numstr+'*':
            line[len(numstr)] = ' '
    else:
        console.write(' ')
        line = bytearray(console.wait_screenline(from_start=True))
    # run or store it; don't clear lines or raise undefined line number
    state.basic_state.direct_line = tokenise.tokenise_line(line)
    c = util.peek(state.basic_state.direct_line)
    if c == '\0':
        # check for lines starting with numbers (6553 6) and empty lines
        empty, scanline = program.check_number_start(state.basic_state.direct_line)
        if not empty:
            program.store_line(state.basic_state.direct_line)
            reset.clear()
        state.basic_state.auto_linenum = scanline + state.basic_state.auto_increment
    elif c != '':
        # it is a command, go and execute
        state.basic_state.execute_mode = True
コード例 #9
0
ファイル: interpreter.py プロジェクト: gilsim12/pcbasic
 def auto_step(self):
     """ Generate an AUTO line number and wait for input. """
     numstr = str(state.basic_state.auto_linenum)
     console.write(numstr)
     if state.basic_state.auto_linenum in state.basic_state.line_numbers:
         console.write('*')
         line = bytearray(console.wait_screenline(from_start=True))
         if line[:len(numstr)+1] == numstr+'*':
             line[len(numstr)] = ' '
     else:
         console.write(' ')
         line = bytearray(console.wait_screenline(from_start=True))
     # run or store it; don't clear lines or raise undefined line number
     state.basic_state.direct_line = tokenise.tokenise_line(line)
     c = util.peek(state.basic_state.direct_line)
     if c == '\0':
         # check for lines starting with numbers (6553 6) and empty lines
         empty, scanline = program.check_number_start(state.basic_state.direct_line)
         if not empty:
             program.store_line(state.basic_state.direct_line)
             reset.clear()
         state.basic_state.auto_linenum = scanline + state.basic_state.auto_increment
     elif c != '':
         # it is a command, go and execute
         state.basic_state.parse_mode = True
コード例 #10
0
    def _users():
        for sample in partition_by(samples, firstitem):
            first, sample = peek(sample)
            pids = HASHED_MAP.get(first[0])
            if not pids:
                # skip samples in diet_data that are missing in
                # taxonomy data
                msg = "Unable to find subject %s in map.txt" % (first[0])
                print >> sys.stderr, msg
                continue
            idxs = list(sorted(int(p.split('.', 1)[1]) for p in pids))

            user = models.User(pids[0].split('.', 1)[0], db=db, load=True)
            user.state[models.User.DIET_KEY] = {
                "instances": list(),
                "averages": None
            }
            idxs = set(idxs)
            user_avg = dict([(k, [0, 0, 0, 0, 0]) for k in fields])
            for i, instance in enumerate(sample):
                d = dict(zip(fields, instance[1:]))
                if i in idxs:
                    user.state[models.User.DIET_KEY]['instances'].append(d)
                update_with(user_avg, d, _update_count)
                update_with(global_avg, d, _update_count)
            user.state[models.User.DIET_KEY]["averages"] = user_avg
            yield user
コード例 #11
0
ファイル: tokenise.py プロジェクト: boriel/pcbasic
def tokenise_jump_number(ins, outs):
    """ Convert an ascii line number pointer to tokenised form. """
    word = tokenise_uint(ins)
    if word != '':
        outs.write(tk.T_UINT + word)
    elif util.peek(ins) == '.':
        ins.read(1)
        outs.write('.')
コード例 #12
0
def tokenise_jump_number(ins, outs):
    """ Convert an ascii line number pointer to tokenised form. """
    word = tokenise_uint(ins)
    if word != '':
        outs.write(tk.T_UINT + word)
    elif util.peek(ins) == '.':
        ins.read(1)
        outs.write('.')
コード例 #13
0
ファイル: tokenise.py プロジェクト: gilsim12/pcbasic
def detokenise_line(ins, bytepos=None):
    """ Convert a tokenised program line to ascii text. """
    current_line = util.parse_line_number(ins)
    if current_line < 0:
        # parse_line_number has returned -1 and left us at: .. 00 | _00_ 00 1A
        # stream ends or end of file sequence \x00\x00\x1A
        return -1, '', 0
    elif current_line == 0 and util.peek(ins) == ' ':
        # ignore up to one space after line number 0
        ins.read(1)
    linum = bytearray(str(current_line))
    # write one extra whitespace character after line number
    # unless first char is TAB
    if util.peek(ins) != '\t':
        linum += bytearray(' ')
    line, textpos = detokenise_compound_statement(ins, bytepos)
    return current_line, linum + line, textpos + len(linum) + 1
コード例 #14
0
def save_all(users, binsize=1000):
    first, users = peek(iter(users))
    db = first.db
    for chunk in partition(users, binsize):
        batch = leveldb.WriteBatch()
        for user in takewhile(operator.truth, chunk):
            user.save()
        db.Write(batch, sync=True)
コード例 #15
0
ファイル: representation.py プロジェクト: Yungzuck/pcbasic
def tokenise_oct(ins, outs):
    """ Convert octal expression in Python string to number token. """
    # O is optional, could also be &777 instead of &O777
    if util.peek(ins).upper() == 'O':
        ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        # oct literals may be interrupted by whitespace
        if c and c in number_whitespace:
            ins.read(1)
        elif not c or c not in string.octdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 8) if word else 0
    outs.write(tk.T_OCT + str(vartypes.integer_to_bytes(vartypes.int_to_integer_unsigned(val))))
コード例 #16
0
ファイル: tokenise.py プロジェクト: Yungzuck/pcbasic
def detokenise_line(ins, bytepos=None):
    """ Convert a tokenised program line to ascii text. """
    current_line = util.parse_line_number(ins)
    if current_line < 0:
        # parse_line_number has returned -1 and left us at: .. 00 | _00_ 00 1A
        # stream ends or end of file sequence \x00\x00\x1A
        return -1, '', 0
    elif current_line == 0 and util.peek(ins) == ' ':
        # ignore up to one space after line number 0
        ins.read(1)
    linum = bytearray(str(current_line))
    # write one extra whitespace character after line number
    # unless first char is TAB
    if util.peek(ins) != '\t':
        linum += bytearray(' ')
    line, textpos = detokenise_compound_statement(ins, bytepos)
    return current_line, linum + line, textpos + len(linum) + 1
コード例 #17
0
ファイル: tokenise.py プロジェクト: boriel/pcbasic
def tokenise_data(ins, outs):
    """ Pass DATA as is, till end of statement, except for literals. """
    while True:
        outs.write(ascii_read_to(ins, ('', '\r', '\0', ':', '"')))
        if util.peek(ins) == '"':
            # string literal in DATA
            tokenise_literal(ins, outs)
        else:
            break            
コード例 #18
0
def tokenise_data(ins, outs):
    """ Pass DATA as is, till end of statement, except for literals. """
    while True:
        outs.write(ascii_read_to(ins, ('', '\r', '\0', ':', '"')))
        if util.peek(ins) == '"':
            # string literal in DATA
            tokenise_literal(ins, outs)
        else:
            break
コード例 #19
0
ファイル: representation.py プロジェクト: Yungzuck/pcbasic
def tokenise_oct(ins, outs):
    """ Convert octal expression in Python string to number token. """
    # O is optional, could also be &777 instead of &O777
    if util.peek(ins).upper() == 'O':
        ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        # oct literals may be interrupted by whitespace
        if c and c in number_whitespace:
            ins.read(1)
        elif not c or c not in string.octdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 8) if word else 0
    outs.write(
        tk.T_OCT +
        str(vartypes.integer_to_bytes(vartypes.int_to_integer_unsigned(val))))
コード例 #20
0
ファイル: rules.py プロジェクト: senegrom/GridPuzzle
    def __init__(self,
                 gsz: util.GridSizeContainer,
                 cells: Optional[Iterable[IdxType]] = None,
                 cell_creator: Optional[CellCreatorType] = None):

        self.cells: ArrayType
        self._rows: int = gsz.rows
        self._cols: int = gsz.cols
        self._max_elem: int = gsz.max_elem
        if cells is not None:
            first, cells = util.peek(cells)
        else:
            first, cells = util.peek(cell_creator(self))

        rc = self._rows * self._cols
        if not isinstance(first, numbers.Integral):
            cells = (keyx + keyy * gsz.rows for keyx, keyy in cells
                     if 0 <= keyx < self._rows and 0 <= keyy < self._cols)
        self.cells = array('i', (cell for cell in cells if 0 <= cell < rc))
        self.len_cells: int = len(self.cells)
コード例 #21
0
ファイル: representation.py プロジェクト: nestormh/pcbasic
def tokenise_hex(ins, outs):
    """ Convert hex expression in Python string to number token. """
    ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        if not c or c not in string.hexdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 16) if word else 0
    outs.write(tk.T_HEX + str(vartypes.value_to_uint(val)))
コード例 #22
0
ファイル: representation.py プロジェクト: nony05/pcbasic
def tokenise_hex(ins, outs):
    """ Convert hex expression in Python string to number token. """
    ins.read(1)
    word = ''
    while True:
        c = util.peek(ins).upper()
        if not c or c not in ascii_hexits:
            break
        else:
            word += ins.read(1).upper()
    val = int(word, 16) if word else 0
    outs.write(tk.T_HEX + str(vartypes.value_to_uint(val)))
コード例 #23
0
ファイル: flow.py プロジェクト: Yungzuck/pcbasic
def read_entry():
    """ READ a unit of DATA. """
    current = state.basic_state.bytecode.tell()
    state.basic_state.bytecode.seek(state.basic_state.data_pos)
    if util.peek(state.basic_state.bytecode) in tk.end_statement:
        # initialise - find first DATA
        util.skip_to(state.basic_state.bytecode, ('\x84', ))  # DATA
    if state.basic_state.bytecode.read(1) not in ('\x84', ','):
        raise error.RunError(error.OUT_OF_DATA)
    vals, word, literal = '', '', False
    while True:
        # read next char; omit leading whitespace
        if not literal and vals == '':
            c = util.skip_white(state.basic_state.bytecode)
        else:
            c = util.peek(state.basic_state.bytecode)
        # parse char
        if c == '' or (not literal and c == ',') or (
                c in tk.end_line or (not literal and c in tk.end_statement)):
            break
        elif c == '"':
            state.basic_state.bytecode.read(1)
            literal = not literal
            if not literal:
                util.require(state.basic_state.bytecode,
                             tk.end_statement + (',', ))
        else:
            state.basic_state.bytecode.read(1)
            if literal:
                vals += c
            else:
                word += c
            # omit trailing whitespace
            if c not in tk.whitespace:
                vals += word
                word = ''
    state.basic_state.data_pos = state.basic_state.bytecode.tell()
    state.basic_state.bytecode.seek(current)
    return vals
コード例 #24
0
ファイル: flow.py プロジェクト: boriel/pcbasic
def read_entry():
    """ READ a unit of DATA. """
    current = state.basic_state.bytecode.tell()
    state.basic_state.bytecode.seek(state.basic_state.data_pos)
    if util.peek(state.basic_state.bytecode) in util.end_statement:
        # initialise - find first DATA
        util.skip_to(state.basic_state.bytecode, ('\x84',))  # DATA
    if state.basic_state.bytecode.read(1) not in ('\x84', ','):
        # out of DATA
        raise error.RunError(4)
    vals, word, literal = '', '', False
    while True:
        # read next char; omit leading whitespace
        if not literal and vals == '':    
            c = util.skip_white(state.basic_state.bytecode)
        else:
            c = util.peek(state.basic_state.bytecode)
        # parse char
        if c == '' or (not literal and c == ',') or (c in util.end_line or (not literal and c in util.end_statement)):
            break
        elif c == '"':
            state.basic_state.bytecode.read(1)
            literal = not literal
            if not literal:
                util.require(state.basic_state.bytecode, util.end_statement+(',',))
        else:        
            state.basic_state.bytecode.read(1)
            if literal:
                vals += c
            else:
                word += c
            # omit trailing whitespace                        
            if c not in util.whitespace:    
                vals += word
                word = ''
    state.basic_state.data_pos = state.basic_state.bytecode.tell()
    state.basic_state.bytecode.seek(current)
    return vals
コード例 #25
0
ファイル: representation.py プロジェクト: Yungzuck/pcbasic
def tokenise_hex(ins, outs):
    """ Convert hex expression in Python string to number token. """
    # pass the H in &H
    ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        # hex literals must not be interrupted by whitespace
        if not c or c not in string.hexdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 16) if word else 0
    outs.write(tk.T_HEX + str(vartypes.integer_to_bytes(vartypes.int_to_integer_unsigned(val))))
コード例 #26
0
ファイル: representation.py プロジェクト: Yungzuck/pcbasic
def tokenise_number(ins, outs):
    """ Convert Python-string number representation to number token. """
    c = util.peek(ins)
    if not c:
        return
    elif c == '&':
        # handle hex or oct constants
        ins.read(1)
        if util.peek(ins).upper() == 'H':
            # hex constant
            tokenise_hex(ins, outs)
        else:
            # octal constant
            tokenise_oct(ins, outs)
    elif c in string.digits + '.+-':
        # handle other numbers
        # note GW passes signs separately as a token
        # and only stores positive numbers in the program
        tokenise_dec(ins, outs)
    else:
        # why is this here?
        # this looks wrong but hasn't hurt so far
        ins.seek(-1, 1)
コード例 #27
0
ファイル: representation.py プロジェクト: Yungzuck/pcbasic
def tokenise_number(ins, outs):
    """ Convert Python-string number representation to number token. """
    c = util.peek(ins)
    if not c:
        return
    elif c == '&':
        # handle hex or oct constants
        ins.read(1)
        if util.peek(ins).upper() == 'H':
            # hex constant
            tokenise_hex(ins, outs)
        else:
            # octal constant
            tokenise_oct(ins, outs)
    elif c in string.digits + '.+-':
        # handle other numbers
        # note GW passes signs separately as a token
        # and only stores positive numbers in the program
        tokenise_dec(ins, outs)
    else:
        # why is this here?
        # this looks wrong but hasn't hurt so far
        ins.seek(-1, 1)
コード例 #28
0
def store_line(line):
    """ Store a program line or schedule a command line for execution. """
    if not line:
        return True
    state.basic_state.direct_line = tokenise.tokenise_line(line)
    c = util.peek(state.basic_state.direct_line)
    if c == '\0':
        # check for lines starting with numbers (6553 6) and empty lines
        program.check_number_start(state.basic_state.direct_line)
        program.store_line(state.basic_state.direct_line)
        reset.clear()
    elif c != '':
        # it is a command, go and execute
        state.basic_state.execute_mode = True
    return not state.basic_state.execute_mode
コード例 #29
0
ファイル: interpreter.py プロジェクト: gilsim12/pcbasic
 def store_line(self, line):
     """ Store a program line or schedule a command line for execution. """
     if not line:
         return True
     state.basic_state.direct_line = tokenise.tokenise_line(line)
     c = util.peek(state.basic_state.direct_line)
     if c == '\0':
         # check for lines starting with numbers (6553 6) and empty lines
         program.check_number_start(state.basic_state.direct_line)
         program.store_line(state.basic_state.direct_line)
         reset.clear()
     elif c != '':
         # it is a command, go and execute
         state.basic_state.parse_mode = True
     return not state.basic_state.parse_mode
コード例 #30
0
def convert_infix_to_postfix(token_list: List[str]):
    # Reference: https://www.geeksforgeeks.org/stack-set-2-infix-to-postfix/
    result = []
    operator_stack = []
    for token in token_list:
        if is_operand(token):
            result.append(token)
        elif is_opening_bracket(token):
            operator_stack.append(token)
        elif is_closing_bracket(token):
            while len(operator_stack) > 0 and peek(
                    operator_stack) != get_corresponding_opening_bracket(
                        token):
                result.append(operator_stack.pop())
            # discard opening bracket
            if len(operator_stack) > 0:
                operator_stack.pop()
            else:
                raise ExpressionSyntaxError(
                    "Cannot find corresponding opening bracket of: " + token)
        elif is_operator(token):
            while len(operator_stack) > 0 and is_operator(peek(operator_stack)) and\
                    (get_precedence(token) < get_precedence(peek(operator_stack))
                     or get_precedence(token) == get_precedence(peek(operator_stack)) and is_left_associative(token)):
                result.append(operator_stack.pop())
            operator_stack.append(token)
        else:
            raise ExpressionSyntaxError("Token is not supported: " + token)

    while len(operator_stack) > 0:
        if is_operator(peek(operator_stack)):
            result.append(operator_stack.pop())
        else:
            raise ExpressionSyntaxError("Invalid expression")

    return result
コード例 #31
0
ファイル: representation.py プロジェクト: Yungzuck/pcbasic
def tokenise_hex(ins, outs):
    """ Convert hex expression in Python string to number token. """
    # pass the H in &H
    ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        # hex literals must not be interrupted by whitespace
        if not c or c not in string.hexdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 16) if word else 0
    outs.write(
        tk.T_HEX +
        str(vartypes.integer_to_bytes(vartypes.int_to_integer_unsigned(val))))
コード例 #32
0
def detokenise_line(ins, bytepos=None):
    """ Convert a tokenised program line to ascii text. """
    litstring, comment = False, False
    textpos = 0
    current_line = util.parse_line_number(ins)
    if current_line < 0:
        # parse_line_number has returned -1 and left us at: .. 00 | _00_ 00 1A
        # stream ends or end of file sequence \x00\x00\x1A
        return -1, '', 0
    elif current_line == 0 and util.peek(ins) == ' ':
        # ignore up to one space after line number 0
        ins.read(1)
    # write one extra whitespace character after line number
    output = str(current_line) + bytearray(' ')
    # detokenise tokens until end of line
    while True:
        s = ins.read(1)
        if not textpos and ins.tell() >= bytepos:
            textpos = len(output)
        if s in tk.end_line:
            # \x00 ends lines and comments when listed,
            # if not inside a number constant
            # stream ended or end of line
            break
        elif s == '"':
            # start of literal string, passed verbatim
            # until a closing quote or EOL comes by
            # however number codes are *printed* as the corresponding numbers,
            # even inside comments & literals
            output += s
            litstring = not litstring
        elif s in tk.number:
            ins.seek(-1, 1)
            representation.detokenise_number(ins, output)
        elif s in tk.linenum:
            # 0D: line pointer (unsigned int) - this token should not be here;
            #     interpret as line number and carry on
            # 0E: line number (unsigned int)
            output += representation.uint_to_str(bytearray(ins.read(2)))
        elif comment or litstring or ('\x20' <= s <= '\x7e'):
            # honest ASCII
            output += s
        else:
            ins.seek(-1, 1)
            comment = detokenise_keyword(ins, output)
    return current_line, output, textpos
コード例 #33
0
ファイル: print_and_input.py プロジェクト: nestormh/pcbasic
def get_number_tokens(fors):
    """ Get consecutive number-related formatting tokens. """
    word, digits_before, decimals = '', 0, 0
    # + comes first
    leading_plus = (util.peek(fors) == '+')
    if leading_plus:
        word += fors.read(1)
    # $ and * combinations
    c = util.peek(fors)
    if c in ('$', '*'):
        word += fors.read(2)
        if word[-1] != c:
            fors.seek(-len(word), 1)
            return '', 0, 0
        if c == '*':
            digits_before += 2
            if util.peek(fors) == '$':
                word += fors.read(1)
        else:
            digits_before += 1
    # number field
    c = util.peek(fors)
    dot = (c == '.')
    if dot:
        word += fors.read(1)
    if c in ('.', '#'):
        while True:
            c = util.peek(fors)
            if not dot and c == '.':
                word += fors.read(1)
                dot = True
            elif c == '#' or (not dot and c == ','):
                word += fors.read(1)
                if dot:
                    decimals += 1
                else:
                    digits_before += 1
            else:
                break
    if digits_before + decimals == 0:
        fors.seek(-len(word), 1)
        return '', 0, 0
    # post characters
    if util.peek(fors, 4) == '^^^^':
        word += fors.read(4)
    if not leading_plus and util.peek(fors) in ('-', '+'):
        word += fors.read(1)
    return word, digits_before, decimals
コード例 #34
0
def get_number_tokens(fors):
    """ Get consecutive number-related formatting tokens. """
    word, digits_before, decimals = '', 0, 0
    # + comes first
    leading_plus = (util.peek(fors) == '+')
    if leading_plus:
        word += fors.read(1)
    # $ and * combinations
    c = util.peek(fors)
    if c in ('$', '*'):
        word += fors.read(2)
        if word[-1] != c:
            fors.seek(-len(word), 1)
            return '', 0, 0
        if c == '*':
            digits_before += 2
            if util.peek(fors) == '$':
                word += fors.read(1)
        else:
            digits_before += 1
    # number field
    c = util.peek(fors)
    dot = (c == '.')
    if dot:
        word += fors.read(1)
    if c in ('.', '#'):
        while True:
            c = util.peek(fors)
            if not dot and c == '.':
                word += fors.read(1)
                dot = True
            elif c == '#' or (not dot and c == ','):
                word += fors.read(1)
                if dot:
                    decimals += 1
                else:
                    digits_before += 1
            else:
                break
    if digits_before + decimals == 0:
        fors.seek(-len(word), 1)
        return '', 0, 0
    # post characters
    if util.peek(fors, 4) == '^^^^':
        word += fors.read(4)
    if not leading_plus and util.peek(fors) in ('-', '+'):
        word += fors.read(1)
    return word, digits_before, decimals
コード例 #35
0
ファイル: representation.py プロジェクト: boriel/pcbasic
def get_string_tokens(fors):
    """ Get consecutive string-related formatting tokens. """
    word = ''
    c = util.peek(fors)
    if c in ('!', '&'):
        word += fors.read(1)
    elif c == '\\':
        word += fors.read(1)
        # count the width of the \ \ token; only spaces allowed and closing \ is necessary
        while True: 
            c = fors.read(1)
            word += c
            if c == '\\':
                break
            elif c != ' ': # can be empty as well
                fors.seek(-len(word), 1)
                return ''
    return word
コード例 #36
0
 def _users():
     for chunk in partition_by(metadata, getpid):
         first, chunk = peek(chunk)
         pid, instance_num = first[-1].split(".")
         user = models.User(pid, db=db, load=True)
         user_avg = dict()
         user.state[models.User.TAXA_KEY] = {
             "instances": list(),
             "averages": None
         }
         for sampleid, otu_fname, hashed in chunk:
             phy_dist = phylum_abd(otu_fname)
             user.state[models.User.TAXA_KEY]["instances"].append(phy_dist)
             update_with(user_avg, phy_dist, add, missingval=0)
             update_with(global_phylum, phy_dist, add, missingval=0)
         user.state[models.User.TAXA_KEY]["averages"] = avg_by_vals(
             user_avg)
         yield user
コード例 #37
0
def get_string_tokens(fors):
    """ Get consecutive string-related formatting tokens. """
    word = ''
    c = util.peek(fors)
    if c in ('!', '&'):
        word += fors.read(1)
    elif c == '\\':
        word += fors.read(1)
        # count the width of the \ \ token;
        # only spaces allowed and closing \ is necessary
        while True:
            c = fors.read(1)
            word += c
            if c == '\\':
                break
            elif c != ' ':  # can be empty as well
                fors.seek(-len(word), 1)
                return ''
    return word
コード例 #38
0
def update_db_diet(diet_fname):
    first, samples = peek(process_diet.load_samples(diet_fname))
    fields = first._fields[1:]
    global_avg = dict([(k, [0, 0, 0, 0, 0]) for k in fields])

    def _update_count(current, new):
        current[new] += 1
        return current

    def _users():
        for sample in partition_by(samples, firstitem):
            first, sample = peek(sample)
            pids = HASHED_MAP.get(first[0])
            if not pids:
                # skip samples in diet_data that are missing in
                # taxonomy data
                msg = "Unable to find subject %s in map.txt" % (first[0])
                print >> sys.stderr, msg
                continue
            idxs = list(sorted(int(p.split('.', 1)[1]) for p in pids))

            user = models.User(pids[0].split('.', 1)[0], db=db, load=True)
            user.state[models.User.DIET_KEY] = {
                "instances": list(),
                "averages": None
            }
            idxs = set(idxs)
            user_avg = dict([(k, [0, 0, 0, 0, 0]) for k in fields])
            for i, instance in enumerate(sample):
                d = dict(zip(fields, instance[1:]))
                if i in idxs:
                    user.state[models.User.DIET_KEY]['instances'].append(d)
                update_with(user_avg, d, _update_count)
                update_with(global_avg, d, _update_count)
            user.state[models.User.DIET_KEY]["averages"] = user_avg
            yield user

    models.save_all(_users())
    global_user = models.User(models.GLOBAL_PID, db=db, load=True)
    global_user.state[models.User.DIET_KEY] = global_avg
    global_user.state[models.User.MTIME_KEY] = NOW
    global_user.save()
コード例 #39
0
ファイル: tokenise.py プロジェクト: boriel/pcbasic
def tokenise_line_number(ins, outs): 
    """ Convert an ascii line number to tokenised start-of-line. """
    linenum = tokenise_uint(ins)
    if linenum != '':    
        # terminates last line and fills up the first char in the buffer 
        # (that would be the magic number when written to file)
        # in direct mode, we'll know to expect a line number if the output 
        # starts with a  00
        outs.write('\0')        
        # write line number. first two bytes are for internal use 
        # & can be anything nonzero; we use this.
        outs.write('\xC0\xDE' + linenum)
        # ignore single whitespace after line number, if any, 
        # unless line number is zero (as does GW)
        if util.peek(ins) == ' ' and linenum != '\0\0' :
            ins.read(1)
    else:
        # direct line; internally, we need an anchor for the program pointer, 
        # so we encode a ':'
        outs.write(':')
コード例 #40
0
def tokenise_line_number(ins, outs):
    """ Convert an ascii line number to tokenised start-of-line. """
    linenum = tokenise_uint(ins)
    if linenum != '':
        # terminates last line and fills up the first char in the buffer
        # (that would be the magic number when written to file)
        # in direct mode, we'll know to expect a line number if the output
        # starts with a  00
        outs.write('\0')
        # write line number. first two bytes are for internal use
        # & can be anything nonzero; we use this.
        outs.write('\xC0\xDE' + linenum)
        # ignore single whitespace after line number, if any,
        # unless line number is zero (as does GW)
        if util.peek(ins) == ' ' and linenum != '\0\0':
            ins.read(1)
    else:
        # direct line; internally, we need an anchor for the program pointer,
        # so we encode a ':'
        outs.write(':')
コード例 #41
0
    def _users():
        for userchunk in partition_by(metadata, getpid):
            first, rest = peek(userchunk)
            rest = list(rest)
            pid = getpid(first)
            user = models.User(pid, db=db, load=True)
            if pid in f_userpcoa:
                user.state[
                    models.User.filtered.PCOA_USER_KEY] = f_userpcoa[pid]
            else:
                user.state[models.User.PCOA_USER_KEY] = userpcoa[pid]

            instances = [k[-1] for k in rest]
            if not all(k in f_samplepcoa for k in instances):
                user.state[models.User.PCOA_SAMPLE_KEY] = [
                    samplepcoa[k] for k in instances
                ]
            else:
                user.state[models.User.filtered.PCOA_SAMPLE_KEY] = [
                    f_samplepcoa[k] for k in instances
                ]
            yield user
コード例 #42
0
def convert_to_token_list(expression):
    # TODO: consider handling sign +/-
    result = []
    for i in range(len(expression)):
        character = expression[i]
        if check_is_part_of_number(character) and len(result) > 0:
            last_token = peek(result)
            if check_is_a_number(last_token):
                result[len(result) - 1] = last_token + character
            else:
                result.append(character)
        else:
            result.append(character)

    # replace - sign with neg operator
    for i in range(len(result)):
        if i == 0 or i > 0 and (is_opening_bracket(result[i-1]) or is_operator(result[i-1])):
            if result[i] == "-":
                result[i] = "neg"
            elif result[i] == "+":
                result[i] = "pos"

    return result
コード例 #43
0
ファイル: representation.py プロジェクト: Yungzuck/pcbasic
def tokenise_dec(ins, outs):
    """ Convert decimal expression in Python string to number token. """
    have_exp = False
    have_point = False
    word = ''
    kill = False
    while True:
        c = ins.read(1).upper()
        if not c:
            break
        elif c in '\x1c\x1d\x1f':
            # ASCII separator chars invariably lead to zero result
            kill = True
        elif c == '.' and not have_point and not have_exp:
            have_point = True
            word += c
        elif c in 'ED' and not have_exp:
            # there's a special exception for number followed by EL or EQ
            # presumably meant to protect ELSE and maybe EQV ?
            if c == 'E' and util.peek(ins).upper() in ('L', 'Q'):
                ins.seek(-1, 1)
                break
            else:
                have_exp = True
                word += c
        elif c in '-+' and (not word or word[-1] in 'ED'):
            # must be first token or in exponent
            word += c
        elif c in string.digits:
            word += c
        elif c in number_whitespace:
            # we'll remove this later but need to keep it for now
            # so we can reposition the stream on removing trailing whitespace
            word += c
        elif c in '!#' and not have_exp:
            word += c
            break
        elif c == '%':
            # swallow a %, but break parsing
            break
        else:
            ins.seek(-1, 1)
            break
    # ascii separators encountered: zero output
    if kill:
        word = '0'
    # don't claim trailing whitespace
    while len(word)>0 and (word[-1] in number_whitespace):
        word = word[:-1]
        ins.seek(-1,1) # even if c==''
    # remove all internal whitespace
    trimword = ''
    for c in word:
        if c not in number_whitespace:
            trimword += c
    word = trimword
    # write out the numbers
    if len(word) == 1 and word in string.digits:
        # digit
        outs.write(chr(0x11+str_to_int(word)))
    elif (not (have_exp or have_point or word[-1] in '!#') and
                            str_to_int(word) <= 0x7fff and str_to_int(word) >= -0x8000):
        if str_to_int(word) <= 0xff and str_to_int(word) >= 0:
            # one-byte constant
            outs.write(tk.T_BYTE + chr(str_to_int(word)))
        else:
            # two-byte constant
            outs.write(tk.T_INT + str(vartypes.integer_to_bytes(vartypes.int_to_integer_signed(str_to_int(word)))))
    else:
        mbf = str(str_to_float(word).to_bytes())
        if len(mbf) == 4:
            outs.write(tk.T_SINGLE + mbf)
        else:
            outs.write(tk.T_DOUBLE + mbf)
コード例 #44
0
def tokenise_line(line):
    """ Convert an ascii program line to tokenised form. """
    ins = StringIO(line)
    outs = StringIO()
    # skip whitespace at start of line
    d = util.skip(ins, ascii_whitespace)
    if d == '':
        # empty line at EOF
        return outs
    # read the line number
    tokenise_line_number(ins, outs)
    # expect line number
    allow_jumpnum = False
    # expect number (6553 6 -> the 6 is encoded as \x17)
    allow_number = True
    # flag for SPC( or TAB( as numbers can follow the closing bracket
    spc_or_tab = False
    # parse through elements of line
    while True:
        # peek next character, convert to uppercase
        c = util.peek(ins).upper()
        # anything after NUL is ignored till EOL
        if c == '\0':
            ins.read(1)
            ascii_read_to(ins, ('', '\r'))
            break
        # end of line
        elif c in ('', '\r'):
            break
        # handle whitespace
        elif c in ascii_whitespace:
            ins.read(1)
            outs.write(c)
        # handle string literals
        elif util.peek(ins) == '"':
            tokenise_literal(ins, outs)
        # handle jump numbers
        elif allow_number and allow_jumpnum and c in ascii_digits + '.':
            tokenise_jump_number(ins, outs)
        # handle numbers
        # numbers following var names with no operator or token in between
        # should not be parsed, eg OPTION BASE 1
        # note we don't include leading signs, encoded as unary operators
        # number starting with . or & are always parsed
        elif c in ('&', '.') or (allow_number and not allow_jumpnum
                                 and c in ascii_digits):
            representation.tokenise_number(ins, outs)
        # operator keywords ('+', '-', '=', '/', '\\', '^', '*', '<', '>'):
        elif c in ascii_operators:
            ins.read(1)
            # operators don't affect line number mode - can do line number
            # arithmetic and RENUM will do the strangest things
            # this allows for 'LIST 100-200' etc.
            outs.write(keyword_to_token[c])
            allow_number = True
        # special case ' -> :REM'
        elif c == "'":
            ins.read(1)
            outs.write(':' + tk.REM + tk.O_REM)
            tokenise_rem(ins, outs)
        # special case ? -> PRINT
        elif c == '?':
            ins.read(1)
            outs.write(tk.PRINT)
            allow_number = True
        # keywords & variable names
        elif c in ascii_uppercase:
            word = tokenise_word(ins, outs)
            # handle non-parsing modes
            if (word in ('REM', "'")
                    or (word == 'DEBUG' and word in keyword_to_token)):
                tokenise_rem(ins, outs)
            elif word == "DATA":
                tokenise_data(ins, outs)
            else:
                allow_jumpnum = (word in linenum_words)
                # numbers can follow tokenised keywords
                # (which does not include the word 'AS')
                allow_number = (word in keyword_to_token)
                if word in ('SPC(', 'TAB('):
                    spc_or_tab = True
        else:
            ins.read(1)
            if c in (',', '#', ';'):
                # can separate numbers as well as jumpnums
                allow_number = True
            elif c in ('(', '['):
                allow_jumpnum, allow_number = False, True
            elif c == ')' and spc_or_tab:
                spc_or_tab = False
                allow_jumpnum, allow_number = False, True
            else:
                allow_jumpnum, allow_number = False, False
            # replace all other nonprinting chars by spaces;
            # HOUSE 0x7f is allowed.
            outs.write(c if ord(c) >= 32 and ord(c) <= 127 else ' ')
    outs.seek(0)
    return outs
コード例 #45
0
ファイル: representation.py プロジェクト: Yungzuck/pcbasic
def tokenise_dec(ins, outs):
    """ Convert decimal expression in Python string to number token. """
    have_exp = False
    have_point = False
    word = ''
    kill = False
    while True:
        c = ins.read(1).upper()
        if not c:
            break
        elif c in '\x1c\x1d\x1f':
            # ASCII separator chars invariably lead to zero result
            kill = True
        elif c == '.' and not have_point and not have_exp:
            have_point = True
            word += c
        elif c in 'ED' and not have_exp:
            # there's a special exception for number followed by EL or EQ
            # presumably meant to protect ELSE and maybe EQV ?
            if c == 'E' and util.peek(ins).upper() in ('L', 'Q'):
                ins.seek(-1, 1)
                break
            else:
                have_exp = True
                word += c
        elif c in '-+' and (not word or word[-1] in 'ED'):
            # must be first token or in exponent
            word += c
        elif c in string.digits:
            word += c
        elif c in number_whitespace:
            # we'll remove this later but need to keep it for now
            # so we can reposition the stream on removing trailing whitespace
            word += c
        elif c in '!#' and not have_exp:
            word += c
            break
        elif c == '%':
            # swallow a %, but break parsing
            break
        else:
            ins.seek(-1, 1)
            break
    # ascii separators encountered: zero output
    if kill:
        word = '0'
    # don't claim trailing whitespace
    while len(word) > 0 and (word[-1] in number_whitespace):
        word = word[:-1]
        ins.seek(-1, 1)  # even if c==''
    # remove all internal whitespace
    trimword = ''
    for c in word:
        if c not in number_whitespace:
            trimword += c
    word = trimword
    # write out the numbers
    if len(word) == 1 and word in string.digits:
        # digit
        outs.write(chr(0x11 + str_to_int(word)))
    elif (not (have_exp or have_point or word[-1] in '!#')
          and str_to_int(word) <= 0x7fff and str_to_int(word) >= -0x8000):
        if str_to_int(word) <= 0xff and str_to_int(word) >= 0:
            # one-byte constant
            outs.write(tk.T_BYTE + chr(str_to_int(word)))
        else:
            # two-byte constant
            outs.write(tk.T_INT + str(
                vartypes.integer_to_bytes(
                    vartypes.int_to_integer_signed(str_to_int(word)))))
    else:
        mbf = str(str_to_float(word).to_bytes())
        if len(mbf) == 4:
            outs.write(tk.T_SINGLE + mbf)
        else:
            outs.write(tk.T_DOUBLE + mbf)
コード例 #46
0
 def eof(self):
     """ Check for end of file EOF. """
     # for EOF(i)
     if self.mode in ('A', 'O'):
         return False
     return (util.peek(self.fhandle) in ('', '\x1a'))
コード例 #47
0
ファイル: expressions.py プロジェクト: gilsim12/pcbasic
def parse_expression(ins, allow_empty=False):
    """ Compute the value of the expression at the current code pointer. """
    stack = deque()
    units = deque()
    d = ''
    missing_error = error.MISSING_OPERAND
    # see https://en.wikipedia.org/wiki/Shunting-yard_algorithm
    while True:
        last = d
        d = util.skip_white(ins)
        # two-byte function tokens
        if d in tk.twobyte:
            d = util.peek(ins, n=2)
        if d == tk.NOT and not (last in operators or last == ''):
            # unary NOT ends expression except after another operator or at start
            break
        elif d in operators:
            ins.read(len(d))
            # get combined operators such as >=
            if d in combinable:
                nxt = util.skip_white(ins)
                if nxt in combinable:
                    d += ins.read(len(nxt))
            if last in operators or last == '' or d == tk.NOT:
                # also if last is ( but that leads to recursive call and last == ''
                nargs = 1
                # zero operands for a binary operator is always syntax error
                # because it will be seen as an illegal unary
                if d not in unary:
                    raise error.RunError(error.STX)
            else:
                nargs = 2
                _evaluate_stack(stack, units, operators[d], error.STX)
            stack.append((d, nargs))
        elif not (last in operators or last == ''):
            # repeated unit ends expression
            # repeated literals or variables or non-keywords like 'AS'
            break
        elif d == '(':
            units.append(parse_bracket(ins))
        elif d and d in string.ascii_letters:
            # variable name
            name, indices = parse_variable(ins)
            units.append(var.get_variable(name, indices))
        elif d in functions:
            # apply functions
            ins.read(len(d))
            try:
                units.append(functions[d](ins))
            except (ValueError, ArithmeticError) as e:
                units.append(_handle_math_error(e))
        elif d in tk.end_statement:
            break
        elif d in tk.end_expression or d in tk.keyword:
            # missing operand inside brackets or before comma is syntax error
            missing_error = error.STX
            break
        else:
            # literal
            units.append(parse_literal(ins))
    # empty expression is a syntax error (inside brackets)
    # or Missing Operand (in an assignment)
    # or not an error (in print and many functions)
    if units or stack:
        _evaluate_stack(stack, units, 0, missing_error)
        return units[0]
    elif allow_empty:
        return None
    else:
        raise error.RunError(missing_error)
コード例 #48
0
ファイル: representation.py プロジェクト: boriel/pcbasic
def tokenise_number(ins, outs):
    """ Convert Python-string number representation to number token. """
    c = util.peek(ins)
    # handle hex or oct constants
    if c == '&':
        ins.read(1)
        nxt = util.peek(ins).upper()
        if nxt == 'H': # hex constant
            ins.read(1)
            word = ''
            while True: 
                if not util.peek(ins).upper() in ascii_hexits:
                    break
                else:
                    word += ins.read(1).upper()
            val = int(word, 16) if word else 0
            outs.write('\x0C' + str(vartypes.value_to_uint(val)))
        else: # nxt == 'O': # octal constant
            if nxt == 'O':
                ins.read(1)
            word = ''    
            while True: 
                if not util.peek(ins).upper() in ascii_octits:
                    break
                else:
                    word += ins.read(1).upper()
            val = int(word, 8) if word else 0
            outs.write('\x0B' + str(vartypes.value_to_uint(val)))
    # handle other numbers
    # note GW passes signs separately as a token and only stores positive numbers in the program        
    elif (c in ascii_digits or c=='.' or c in ('+','-')):
        have_exp = False
        have_point = False
        word = ''
        while True: 
            c = ins.read(1).upper()
            if c == '.' and not have_point and not have_exp:
                have_point = True
                word += c
            elif c in ('E', 'D') and not have_exp:    
                have_exp = True
                word += c
            elif c in ('-','+') and word=='':
                # must be first token
                word += c              
            elif c in ('+', '-') and word[-1] in ('E', 'D'):
                word += c
            elif c in ascii_digits: # (c >='0' and numc <='9'):
                word += c
            elif c in whitespace:
                # we'll remove this later but need to keep it for now so we can reposition the stream on removing trainling whitespace 
                word += c
            elif c in ('!', '#') and not have_exp:
                word += c
                break
            elif c == '%':
                # swallow a %, but break parsing
                break    
            else:
                if c != '':
                    ins.seek(-1,1)
                break
        # don't claim trailing whitespace, don't end in D or E            
        while len(word)>0 and (word[-1] in whitespace + ('D', 'E')):
            if word[-1] in ('D', 'E'):
                have_exp = False
            word = word[:-1]
            ins.seek(-1,1) # even if c==''
        # remove all internal whitespace
        trimword = ''
        for c in word:
            if c not in whitespace:
                trimword += c
        word = trimword
        # write out the numbers
        if len(word) == 1 and word in ascii_digits:
            # digit
            outs.write(chr(0x11+int(word)))
        elif not (have_exp or have_point or word[-1] in ('!', '#')) and int(word) <= 0x7fff and int(word) >= -0x8000:
            if int(word) <= 0xff and int(word)>=0:
                # one-byte constant
                outs.write('\x0f'+chr(int(word)))
            else:
                # two-byte constant
                outs.write('\x1c'+str(vartypes.value_to_sint(int(word))))
        else:
            mbf = str(from_str(word).to_bytes())
            if len(mbf) == 4:
                # single
                outs.write('\x1d'+mbf)
            else:    
                # double
                outs.write('\x1f'+mbf)
    elif c!='':
        ins.seek(-1,1)
コード例 #49
0
ファイル: expressions.py プロジェクト: Yungzuck/pcbasic
def parse_expression(ins, allow_empty=False):
    """ Compute the value of the expression at the current code pointer. """
    stack = deque()
    units = deque()
    d = ''
    missing_error = error.MISSING_OPERAND
    # see https://en.wikipedia.org/wiki/Shunting-yard_algorithm
    while True:
        last = d
        d = util.skip_white(ins)
        # two-byte function tokens
        if d in tk.twobyte:
            d = util.peek(ins, n=2)
        if d == tk.NOT and not (last in operators or last == ''):
            # unary NOT ends expression except after another operator or at start
            break
        elif d in operators:
            ins.read(len(d))
            # get combined operators such as >=
            if d in combinable:
                nxt = util.skip_white(ins)
                if nxt in combinable:
                    d += ins.read(len(nxt))
            if last in operators or last == '' or d == tk.NOT:
                # also if last is ( but that leads to recursive call and last == ''
                nargs = 1
                # zero operands for a binary operator is always syntax error
                # because it will be seen as an illegal unary
                if d not in unary:
                    raise error.RunError(error.STX)
            else:
                nargs = 2
                _evaluate_stack(stack, units, operators[d], error.STX)
            stack.append((d, nargs))
        elif not (last in operators or last == ''):
            # repeated unit ends expression
            # repeated literals or variables or non-keywords like 'AS'
            break
        elif d == '(':
            units.append(parse_bracket(ins))
        elif d and d in string.ascii_letters:
            # variable name
            name, indices = parse_variable(ins)
            units.append(var.get_variable(name, indices))
        elif d in functions:
            # apply functions
            ins.read(len(d))
            try:
                units.append(functions[d](ins))
            except (ValueError, ArithmeticError) as e:
                units.append(_handle_math_error(e))
        elif d in tk.end_statement:
            break
        elif d in tk.end_expression or d in tk.keyword:
            # missing operand inside brackets or before comma is syntax error
            missing_error = error.STX
            break
        else:
            # literal
            units.append(parse_literal(ins))
    # empty expression is a syntax error (inside brackets)
    # or Missing Operand (in an assignment)
    # or not an error (in print and many functions)
    if units or stack:
        _evaluate_stack(stack, units, 0, missing_error)
        return units[0]
    elif allow_empty:
        return None
    else:
        raise error.RunError(missing_error)
コード例 #50
0
ファイル: tokenise.py プロジェクト: boriel/pcbasic
def detokenise_keyword(ins, output):
    """ Convert a one- or two-byte keyword token to ascii. """
    # try for single-byte token or two-byte token
    # if no match, first char is passed unchanged
    s = ins.read(1)
    try:
        keyword = token_to_keyword[s]
    except KeyError:
        s += util.peek(ins)
        try:
            keyword = token_to_keyword[s]
            ins.read(1)
        except KeyError:
            output += s[0]
            return False
    # when we're here, s is an actual keyword token.
    # number followed by token is separated by a space 
    if (output and chr(output[-1]) in ascii_digits and s not in tk.operator):
        output += ' '
    output += keyword
    comment = False
    if keyword == "'":
        comment = True
    elif keyword == "REM":
        nxt = ins.read(1)
        if nxt == '':
            pass
        elif nxt == tk.O_REM: # ' 
            # if next char is token('), we have the special value REM' 
            # -- replaced by ' below.
            output += "'"
        else:
            # otherwise, it's part of the comment or an EOL or whatever, 
            # pass back to stream so it can be processed
            ins.seek(-1, 1)
        comment = True
    # check for special cases
    #   [:REM']   ->  [']
    if len(output) > 4 and str(output[-5:]) ==  ":REM'":
        output[:] = output[:-5] + "'"  
    #   [WHILE+]  ->  [WHILE]
    elif len(output) > 5 and str(output[-6:]) == "WHILE+":
        output[:] = output[:-1]        
    #   [:ELSE]  ->  [ELSE]
    # note that anything before ELSE gets cut off, 
    # e.g. if we have 1ELSE instead of :ELSE it also becomes ELSE
    # SIC: len(output) > 4 and str(output[-4:])
    elif len(output) > 4 and str(output[-4:]) == "ELSE":
        if (len(output) > 5 and chr(output[-5]) == ':' and 
                    chr(output[-6]) in ascii_digits):
            output[:] = output[:-5] + " ELSE" 
        else:
            output[:] = output[:-5] + "ELSE"
    # token followed by token or number is separated by a space, 
    # except operator tokens and SPC(, TAB(, FN, USR
    nxt = util.peek(ins)
    if (not comment and 
            nxt.upper() not in util.end_line + tk.operator + 
                                (tk.O_REM, '"', ',', ' ', ':', '(', ')', '$', 
                                 '%', '!', '#', '_', '@', '~', '|', '`') and
            s not in tk.operator + tk.with_bracket + 
                      (tk.USR, tk.FN)): 
        # excluding TAB( SPC( and FN. \xD9 is ', \xD1 is FN, \xD0 is USR.
        output += ' '
    return comment
コード例 #51
0
ファイル: tokenise.py プロジェクト: boriel/pcbasic
def tokenise_line(line):
    """ Convert an ascii program line to tokenised form. """
    ins = StringIO(line)
    outs = StringIO()          
    # skip whitespace at start of line
    d = util.skip(ins, whitespace)
    if d == '':
        # empty line at EOF
        return outs
    # read the line number
    tokenise_line_number(ins, outs)
    # expect line number
    allow_jumpnum = False
    # expect number (6553 6 -> the 6 is encoded as \x17)
    allow_number = True
    # flag for SPC( or TAB( as numbers can follow the closing bracket
    spc_or_tab = False
    # parse through elements of line
    while True: 
        # peek next character, convert to uppercase
        c = util.peek(ins).upper()
        # anything after NUL is ignored till EOL
        if c == '\0':
            ins.read(1)
            ascii_read_to(ins, ('', '\r'))
            break
        # end of line    
        elif c in ('', '\r'):
            break
        # handle whitespace
        elif c in whitespace:
            ins.read(1)
            outs.write(c)
        # handle string literals    
        elif util.peek(ins) == '"':
            tokenise_literal(ins, outs)
        # handle jump numbers
        elif allow_number and allow_jumpnum and c in ascii_digits + ('.',):
            tokenise_jump_number(ins, outs) 
        # handle numbers
        # numbers following var names with no operator or token in between 
        # should not be parsed, eg OPTION BASE 1
        # note we don't include leading signs, encoded as unary operators
        # number starting with . or & are always parsed
        elif c in ('&', '.') or (allow_number and 
                                  not allow_jumpnum and c in ascii_digits):
            representation.tokenise_number(ins, outs)
        # operator keywords ('+', '-', '=', '/', '\\', '^', '*', '<', '>'):    
        elif c in ascii_operators: 
            ins.read(1)
            # operators don't affect line number mode - can do line number 
            # arithmetic and RENUM will do the strangest things
            # this allows for 'LIST 100-200' etc.
            outs.write(keyword_to_token[c])    
            allow_number = True
        # special case ' -> :REM'
        elif c == "'":
            ins.read(1)
            outs.write(':' + tk.REM + tk.O_REM)
            tokenise_rem(ins, outs)
        # special case ? -> PRINT 
        elif c == '?':
            ins.read(1)
            outs.write(tk.PRINT)
            allow_number = True
        # keywords & variable names       
        elif c in ascii_uppercase:
            word = tokenise_word(ins, outs)
            # handle non-parsing modes
            if (word in ('REM', "'") or 
                            (word == 'DEBUG' and word in keyword_to_token)):
                tokenise_rem(ins, outs)
            elif word == "DATA":    
                tokenise_data(ins, outs)
            else:    
                allow_jumpnum = (word in linenum_words)
                # numbers can follow tokenised keywords 
                # (which does not include the word 'AS')
                allow_number = (word in keyword_to_token)
                if word in ('SPC(', 'TAB('):
                    spc_or_tab = True
        else:
            ins.read(1)
            if c in (',', '#', ';'):
                # can separate numbers as well as jumpnums
                allow_number = True
            elif c in ('(', '['):
                allow_jumpnum, allow_number = False, True
            elif c == ')' and spc_or_tab:
                spc_or_tab = False
                allow_jumpnum, allow_number = False, True
            else:
                allow_jumpnum, allow_number = False, False
            # replace all other nonprinting chars by spaces; 
            # HOUSE 0x7f is allowed.
            outs.write(c if ord(c) >= 32 and ord(c) <= 127 else ' ')
    outs.seek(0)
    return outs
コード例 #52
0
ファイル: tokenise.py プロジェクト: boriel/pcbasic
def tokenise_literal(ins, outs):
    """ Pass a string literal. """
    outs.write(ins.read(1))
    outs.write(ascii_read_to(ins, ('', '\r', '\0', '"') ))
    if util.peek(ins)=='"':
        outs.write(ins.read(1))    
コード例 #53
0
def tokenise_literal(ins, outs):
    """ Pass a string literal. """
    outs.write(ins.read(1))
    outs.write(ascii_read_to(ins, ('', '\r', '\0', '"')))
    if util.peek(ins) == '"':
        outs.write(ins.read(1))
コード例 #54
0
 def end_of_file(self):
     """ Check for end of file - for internal use. """
     return (util.peek(self.fhandle) in ('', '\x1a'))
コード例 #55
0
ファイル: task3.py プロジェクト: elgator/linprog
model += pulp.lpSum(x) == pulp.lpSum(purchases), "Closed sum"

# Target: max profit with penalty for inventory
# Note: penalty coef will matter, try 0.1 instead of 100
# AG - I will go with 0.1 (any value lower than the price of the product)
# for cost of holding inventory. If we use 100, the penalty will be 100 times
# the product. hence if we sell 1 product after a day, we will be paid $1 but
# will have to pay $100 for the inventory and hence the model will minimize
# the inventory
cost_of_holding_inventory = 1
# EP: examples of cost_of_holding_inventory values
# cost_of_holding_inventory = 0.1
# production: [1.5, 5.0, 0.0, 5.0]
# cost_of_holding_inventory = 2
# production: [1.0, 5.0, 0.0, 5.0]
model += pulp.lpSum([
    purchases[d] for d in days
]) - cost_of_holding_inventory * pulp.lpSum([inventory[d] for d in days])

feasibility = model.solve()
print("Status:", pulp.LpStatus[feasibility])

print("production:", peek(x))
print("total:", sum(peek(x)))
print("purchases:", peek(purchases))
print("total:", sum(peek(purchases)))
for accepted, order in zip(peek(accept), orders):
    print(order, "accepted" if accepted else "rejected")
print("Objective value:", model.objective.value())
assert model.objective.value() == 11