Ejemplo n.º 1
0
def parse(move, getUnits=False, defaultUnit='in'):
  '''For Move, Copy, and Replicate, This function evaluates the user input, grabs
  any x,y values and if getUnits is passed gets the units.  Parses any x,y, and
  converts the units to inches, and then outputs an array of the locations to
  move, copy or whatever.  You can use this with an int input (replicate), but
  make sure to cast it to an int.'''
  if isinstance(move, str):
    move = [move]
  #does [no unit specified] need escape chars \[  \]?
  # no because r''
  units = r'(?P<units>in|mil|mm|[NoUnitSpecified]?)' if getUnits else r''
  out = []
  for m in move:
    # remove all of the white space
    m = re.sub(r'\s','',m).strip()
    #
    g = re.match(r'(?P<x>-?\d*\.?\d*)\,(?P<y>-?\d*\.?\d*)'+units, m, re.I)
    if not g:
      error('Argument Parse Failed on [%s] failed! Check the arguments'%(m))

    # default to inches or a specific unit
    if (g.group('units') is None) or (len(g.group('units')) == 0):
      unit = defaultUnit
    else:
      unit = g.group('units')

    # Ok prepare them for output
    item = map(float,map(g.group,['x','y']))
    if getUnits: item = map(convertUnits,item,[unit]*2)
    if getUnits: item = [convertUnits(x,y) for x,y in zip(item,[unit]*2)]
    out.append(item)
  #
  return (out[0] if len(out) == 1 else out)
Ejemplo n.º 2
0
 def save(self,filename=None, pdf=False):
   if filename is None and self.filename is None: error("nowhere to save")
   fname = filename if self.filename is None else self.filename
   puts(colored.green('Writing : %s'%fname))
   # self.d.writetofile(fname)
   if '.pdf' in fname:
     self.c.writePDFfile(fname)
   else:
     self.c.writeEPSfile(fname)
Ejemplo n.º 3
0
 def save(self,filename=None):
   if filename is None and self.filename is None: error("nowhere to save")
   fname = filename if self.filename is None else self.filename
   puts(colored.green('Writing : %s'%fname)) 
   # add the final bounding box to the postscript header
   self.apply_bounding_box()
   # combine everything together
   final_eps = self.eps_header + "\n" + self.box + "\n" + self.ps + "\ngrestore\n"
   with open(fname, "w") as eps_file:
     eps_file.write(final_eps)
Ejemplo n.º 4
0
  def __init__(self, symbol):
    # Constructor will download (or get from cache)
    # - conid
    # - contract (industry)
    self.symbol = symbol

    # Config
    cfg = Config()
    self.cfg = {
      'dir_conids': cfg['paths']['conids'],
      'dir_contracts': cfg['paths']['contracts'],
      'dir_day': cfg['paths']['day'],
      'dir_quotes': cfg['paths']['quotes'],
    }

    try:
      # Get conid from cache
      self.conid = self.disk_find_by('symbol', self.symbol)
      if self.conid == 'None':
        raise Exception('Not found in cache')
    except Exception as e:
      # Not found in cache, download
      try:
        print('%s: Down conid' % self.symbol)
        self.conid = self.down_conid()
        if self.conid == 'None':
          raise Exception('Unable to down conid')
        print('conid', self.conid)
      except Exception as e:
        print('%s: Could not down conid: %s' % (self.symbol, e))
        self.add_to_skip_list()
        raise Exception(e)
    try:
      # Get industry from cache
      contract = self.get_contract()
      self.industry = contract['industry']
    except Exception as e:
      error('Could not get contract from cache or download')
      # Add to bad list
      self.add_to_skip_list()
      raise Exception(e)
Ejemplo n.º 5
0
def parse(move, getUnits=False, defaultUnit='in'):
    '''For Move, Copy, and Replicate, This function evaluates the user input, grabs
  any x,y values and if getUnits is passed gets the units.  Parses any x,y, and
  converts the units to inches, and then outputs an array of the locations to
  move, copy or whatever.  You can use this with an int input (replicate), but
  make sure to cast it to an int.'''
    if isinstance(move, str):
        move = [move]
    #does [no unit specified] need escape chars \[  \]?
    # no because r''
    units = r'(?P<units>in|mil|mm|[NoUnitSpecified]?)' if getUnits else r''
    out = []
    for m in move:
        # remove all of the white space
        m = re.sub(r'\s', '', m).strip()
        #
        g = re.match(r'(?P<x>-?\d*\.?\d*)\,(?P<y>-?\d*\.?\d*)' + units, m,
                     re.I)
        if not g:
            error('Argument Parse Failed on [%s] failed! Check the arguments' %
                  (m))

        # default to inches or a specific unit
        if (g.group('units') is None) or (len(g.group('units')) == 0):
            unit = defaultUnit
        else:
            unit = g.group('units')

        # Ok prepare them for output
        item = map(float, map(g.group, ['x', 'y']))
        if getUnits: item = map(convertUnits, item, [unit] * 2)
        if getUnits:
            item = [convertUnits(x, y) for x, y in zip(item, [unit] * 2)]
        out.append(item)
    #
    return (out[0] if len(out) == 1 else out)
Ejemplo n.º 6
0
from lib.lexical_analyzer import LexicalAnalyzer
from lib.parser import Parser
from lib.prettytable import PrettyTable

__author__ = 'Nicholas Pickering'

filename = 0

#   Start Main Program
# print("Recursive Descent Parser")
# print("Written by Nicholas Pickering")

if len(sys.argv) > 1:
    filename = sys.argv[1]
else:
    util.error("No test directory specified... Exiting...", True)

#   Read in file for processing...
files = glob.glob("data/" + str(sys.argv[1]) + "/*.txt")
no_errors = True
for filename in files:

    file = open(filename, "r")
    if not file:
        util.error("File could not be loaded... Exiting...", True)

    lexical_analyzer = LexicalAnalyzer()
    tokens = lexical_analyzer.process_file(file)

    if len(tokens) > 0:
        tokens.reverse()
Ejemplo n.º 7
0
def process(file):
    lst = []
    lines_counted = 0
    literal_stack_hex = []
    literal_stack_char = []
    success = True

    #
    #   Process Input File
    #   We iterate over each line to collect information about it, to determine its relevance in the final object code
    #
    for line in file.readlines():
        line = line.replace("\n", "")

        # Check if line is a comment, if so skip
        if line[0] == '.':
            util.add_lst_record(str(lines_counted+1), '        ',
                                '', line, {"flag": "-comm"}, lst)
            lines_counted += 1
            continue

        # Check for blank lines, if so skip
        if "".join(line.split()) == "":
            continue

        # break up the line appropriately
        label = line[:7].replace(" ", "")

        extended = False
        if len(line) > 8:
            extended = line[9] == "+"

        sic = False
        if len(line) > 8:
            sic = line[9] == "*"

        mneumonic = line[10:16].replace(" ", "")

        addressing = ""
        if len(line) > 17:
            addressing = line[18]

        operand = line[19:28].replace(" ", "")

        indexed = False
        if ",X" in operand:
            operand = operand.replace(",X", "")
            indexed = True

        # lookup operation for format size
        operation = util.lookup_operation(mneumonic)

        meta = {
            "label": label,
            "mneumonic": mneumonic,
            "operation": operation,
            "operand": operand,
            "indexed": indexed,
            "extended": extended,
            "sic": sic,
            "addressing": addressing
        }

        #
        #   Add Line Items to lst File
        #   Based on information pulled from processed file, we generate memory locations for each line item
        #
        lst_record_added = False
        if not operation:
            util.add_lst_record(str(lines_counted+1), '     ',
                                '', line, {"flag": "-notop"}, lst)
            continue

        # Handle START case
        if operation.name is "START":
            if lines_counted is 0:
                location = int(operand, 16)
            else:
                util.error("START must be the first line called")
                continue

        # Handle LTORG / END literal organization
        elif operation.name in ["LTORG", "END"]:
            literal_counter = 0

            util.add_lst_record(str(lines_counted+1), str(hex(location)),
                                '', line, meta, lst)
            lst_record_added = True

            #
            #   Process Literal Stacks
            #   As we process our file, we collect literals to be allocated at LTORG or END checkpoints
            #
            #   Here, we allocate the memory and store the symbols necessary to process literals.
            #

            # Process Character Literals
            if len(literal_stack_char) > 0:
                while len(literal_stack_char):
                    literal = literal_stack_char.pop()
                    operand = "C'" + literal + "'"
                    source = "=" + operand + "\t  BYTE\t  " + operand + "\t\t.literal organization"
                    write_response = symbol_table.write_symbol(operand, location)
                    operation_size = len(literal)

                    meta = {
                        "label": source,
                        "mneumonic": "BYTE",
                        "operation": util.lookup_operation("BYTE"),
                        "operand": operand,
                        "indexed": indexed,
                        "extended": extended,
                        "sic": sic,
                        "addressing": addressing,
                        "flag": "-litch"
                    }
                    util.add_lst_record("+" + str(literal_counter+1) + "+", str(hex(location)),
                                        '', source, meta, lst)

                    if write_response['success'] is not True:
                        util.add_lst_error(str(lines_counted+1), write_response['message'], lst)
                        success = False
                        operation_size = 0

                    location += operation_size
                    literal_counter += 1

            # Process Hex Literals
            if len(literal_stack_hex) > 0:
                while len(literal_stack_hex):
                    literal = literal_stack_hex.pop()
                    operand = "X'" + literal + "'"
                    source = "=" + operand + "\t  BYTE\t  " + operand + "\t.literal organization"
                    write_response = symbol_table.write_symbol(operand, location)
                    operation_size = int(len(literal)/2)

                    meta = {
                        "label": source,
                        "mneumonic": "BYTE",
                        "operation": util.lookup_operation("BYTE"),
                        "operand": operand,
                        "indexed": indexed,
                        "extended": extended,
                        "sic": sic,
                        "addressing": addressing,
                        "flag": "-lithx"
                    }
                    util.add_lst_record("+" + str(literal_counter+1) + "+", str(hex(location)),
                                        '', source, meta, lst)

                    if write_response['success'] is not True:
                        operation_size = 0
                        util.add_lst_error(str(lines_counted+1), write_response['message'], lst)
                        success = False

                    # if hex literal is invalid, anticipate no memory increase
                    if len(literal) % 2 != 0:
                        operation_size = 0

                    location += operation_size
                    literal_counter += 1

            lines_counted += 1

        # operation is valid, if label exists add it to the symbol table

        #
        #   Store Label in Symbol Table
        #   If an operation is deemed valid and the source line contains a label for the instruction,
        #       we add the label to the symbol table.
        #
        if len(label) > 0:

            # check for a label to store
            write_response = symbol_table.write_symbol(label, location)

            if write_response['success'] is not True:
                util.add_lst_error(str(lines_counted+1), write_response['message'], lst)
                success = False

        # add line to lst record, if it hasn't already been added
        if not lst_record_added:
            util.add_lst_record(str(lines_counted+1), str(hex(location)),
                                '', line, meta, lst)

            #
            #   Determine Memory Location of Next Operation
            #   We increment our current memory location per the current operation's size
            #
            #   Here, we must also process dynamically sized operations such as BYTE operations, and operations
            #       with several formats (extended operations).
            #
            operation_size = 0
            if operation.opcode is None:
                pass
            elif len(operation.format_list) is 0:
                # operation_size must be calculated
                if extended:
                    util.error("Operation is marked as extended, but extended version is not available...")
                    continue
                else:
                    if operation.name is "RESW":
                        operation_size = int(operand) * 3
                    elif operation.name is "RESB":
                        operation_size = int(operand)
                    elif operation.name is "BYTE":
                        literal = operand[1:].replace("'", '')
                        if operand[:1] == 'X':
                            if len(literal) % 2 == 0:
                                operation_size = int(len(literal)/2)
                            else:
                                util.add_lst_error(str(lines_counted+1),
                                                   "Odd number of X bytes found in operand field", lst)
                                success = False
                        elif operand[:1] == 'C':
                            operation_size = len(literal)
                        else:
                            util.error("Malformed operand: " + operand[:1] + "...")
            else:
                operation_size = operation.format_list[0]
                if extended:
                    if len(operation.format_list) > 1:
                        operation_size = operation.format_list[1]
                    else:
                        operation_size = 0
                        util.error("Operation is marked as extended, but extended version is not available...")

            location += operation_size

            #
            #   Process Literals to Stacks
            #   If an operation's operand consists of a literal, we must store it temporarily on a stack and
            #       process its memory location later.
            #
            if addressing == '=':
                if len(operand) > 0:
                    literal = operand[1:].replace("'", '')
                    if operand[0] == 'C':
                        literal_stack_char.append(literal)
                    elif operand[0] == 'X':
                        literal_stack_hex.append(literal)

            # done processing a line of code to be assembled, increment lines counted
            lines_counted += 1

    return {
        "lst": lst,
        "success": success
    }
Ejemplo n.º 8
0
from lib import util, astar
from shapely.geometry import Polygon, LineString

__author__ = 'Nicholas Pickering'

filename = 0

#   Start Main Program
print("A* Search Simulation")
print("Written by Nicholas Pickering")

#   Read in file for processing...
if len(sys.argv) > 1:
    filename = sys.argv[1]
else:
    util.error("No filename specified... Exiting...", True)

file = open(filename, "r")
if not file:
    util.error("File could not be loaded... Exiting...", True)

#
#   Process File for Input Map
#   Load in Polygons and other states
#

start = None
goal = None
line_number = 0
for line in file.readlines():
Ejemplo n.º 9
0
from lib.parser import Parser
from lib.prettytable import PrettyTable

__author__ = 'Nicholas Pickering'

filename = 0

#   Start Main Program
# print("Recursive Descent Parser")
# print("Written by Nicholas Pickering")

#   Read in file for processing...
if len(sys.argv) > 1:
    filename = sys.argv[1]
else:
    util.error("No filename specified... Exiting...", True)

file = open(filename, "r")
if not file:
    util.error("File could not be loaded... Exiting...", True)

lexical_analyzer = LexicalAnalyzer()
tokens = lexical_analyzer.process_file(file)

if len(tokens) > 0:
    tokens.reverse()

parser = Parser(tokens)
parse_result = parser.parse()

table = PrettyTable(["i", "opcode", "operand1", "operand2", "result"])
Ejemplo n.º 10
0

number_of_page_requests = 0
number_of_pages = 0
number_of_experiments = 100

#   Start Main Program
print("Page Replacement Policy Simulation")
print("Written by Nicholas Pickering")

#   Read in file for processing...
if len(sys.argv) == 3:
    number_of_page_requests = int(sys.argv[1])
    number_of_pages = int(sys.argv[2])
else:
    util.error("Incorrect number of arguments... Exiting...", True)

#
#   Process Input
#

print("Number of Page Requests: ", number_of_page_requests)
print("Number of Pages: ", number_of_pages)

for page_frame_count in range(2, number_of_pages+1):

    # test out algorithms with number of page frames leading up to max page frames
    print("Starting Tests for Page Frame Count: ", page_frame_count)

    page_faults_fifo_total = 0
    page_faults_lru_total = 0
Ejemplo n.º 11
0
    # TODO: get_winners_lt_perc does not actually use perc_increase
    winner_symbols = get_winners_lt_perc(price_max, perc_increase)
    if debug:
        print('Got %i winner symbols' % len(winner_symbols))
except Exception as e:
    print('ERROR: Could not get winners:', e)
    exit(1)
# Populate data
out = {}
for symbol, price in winner_symbols.items():
    out[symbol] = {
        'price': price,
        'industry': None,
    }

try:
    # Get industry from symbols
    for symbol, price in winner_symbols.items():
        try:
            out[symbol].update({'industry': Company(symbol).industry})
        except Exception as e:
            error('ERROR: Could not get industry', e)
except Exception as e:
    error('ERROR: Could not get industries', e)
    exit(1)

# Final print
print(len(out))
print(out['REFR'])
#print(json.dumps(out))
location = 0
filename = ""
time_quantum = 0

generate_obj = False

#   Start Main Program
print("Uniprocessor Scheduling Algorithm Simulation")
print("Written by Nicholas Pickering")

#   Read in file for processing...
if len(sys.argv) > 1:
    filename = sys.argv[1]
else:
    util.error("No filename specified... Exiting...", True)

file = open(filename, "r")
if not file:
    util.error("File could not be loaded... Exiting...", True)

#   Validate Time Quantum
if len(sys.argv) > 2 and int(sys.argv[2]) > 0:
    time_quantum = int(sys.argv[2])
else:
    util.error("Invalid Time Quantum (second argument)... Exiting...", True)

#
#   Process Input
#
jobs = util.load_file(file)
Ejemplo n.º 13
0
from lib.lexical_analyzer import LexicalAnalyzer
from lib.parser import Parser
from lib.prettytable import PrettyTable

__author__ = 'Nicholas Pickering'

filename = 0

#   Start Main Program
# print("Recursive Descent Parser")
# print("Written by Nicholas Pickering")

if len(sys.argv) > 1:
    filename = sys.argv[1]
else:
    util.error("No test directory specified... Exiting...", True)

#   Read in file for processing...
files = glob.glob("data/" + str(sys.argv[1]) + "/*.txt")
no_errors = True
for filename in files:

    file = open(filename, "r")
    if not file:
        util.error("File could not be loaded... Exiting...", True)

    lexical_analyzer = LexicalAnalyzer()
    tokens = lexical_analyzer.process_file(file)

    if len(tokens) > 0:
        tokens.reverse()