Exemple #1
0
def assemble(source, nop_count=0, format=OutputFormat.XILINX):
    program = []
    tail = ['thread_finished'] + ['nop'] * nop_count

    for instruction_tokens in scan(source) + scan(tail):
        instruction_format = get_instruction_format(instruction_tokens[0])

        encoded_instruction = 0
        if instruction_format == 'i':
            encoded_instruction = parse_i_instruction(instruction_tokens)
        if instruction_format == 'r':
            encoded_instruction = parse_r_instruction(instruction_tokens)

        if instruction_tokens[-2]:  # Mask
            encoded_instruction += (1 << 31)

        if format == OutputFormat.XILINX:
            program.append('X"{:08x}", -- {}'.format(
                encoded_instruction,
                instruction_tokens[-1]
                ))
        if format == OutputFormat.CPU:
            program.append('0x{:08x}, // {}'.format(
                encoded_instruction,
                instruction_tokens[-1]
                ))

    program[-1] = program[-1].replace(',', '', 1)
    return program
Exemple #2
0
def read(bot):
    global Bot
    Bot = bot
    
    if bot.remote['nick'] and bot.remote['nick'] != bot.nick:
        if bot.remote['message'].startswith(bot.prefix):
            bot._debug("Command received: %s" % bot.remote['message'])
            args = bot.remote['message'][1:].rstrip().split(" ")
            command = args[0].lower()
            
            if bot.remote['nick'].lower() not in bot.inv['banned']:
                if command in library['admin']:
                    bot._debug('This is an admin-only command.')
                    can_do = bot.remote['host'] in [host.strip() for host in bot.config.get(bot.network, 'admin_hostnames').split(',')]
                    #can_do = can_do or bot.remote['nick'] in [nick.strip() for nick in bot.config.get(bot.network, 'admin').split(',')]
                    if can_do:
                        bot.previous['user'] = bot.remote['sendee']
                        pub.sendMessage("func.admin.%s" % library['admin'][command], bot=bot, args=args)
                    else:
                        if bot.voice:
                            reply(bot.remote['sendee'], "%s: Can't do that, noob." % bot.remote['nick'])
                elif bot.voice and command in library['common']:
                    bot._debug('This is a common command.')
                    pub.sendMessage("func.common.%s" % library['common'][command], bot=bot, args=args)
                    bot.previous['user'] = bot.remote['sendee']
        
        elif bot.remote['message'].startswith("\x01") and bot.remote['message'].endswith("\x01"):
            type = bot.remote['message'][1:-1].split()[0]
            args = bot.remote['message'][1:-1].split()[1:]
            if type != "ACTION":
                ctcp(type, args)
        
        elif bot.remote['mid'] == "INVITE" and bot.remote['nick'].lower() not in bot.inv['banned']:
            join([bot.remote['mid'], bot.remote['message']])
        
        else:
            if bot.init['registered'] and not bot.init['identified']:
                if bot.remote['nick'] == "NickServ":
                    if "registered" in bot.remote['message']:
                        bot._login()
                    elif re.search("(accepted|identified)", bot.remote['message']):
                        bot.init['identified'] = True
                        __import__('time').sleep(3)
                        autojoin()
            
            if bot.voice:
                #start scanning messages for certain data
                try: scanner.scan(bot)
                except (__import__('urllib2').URLError, __import__('socket').timeout): util.answer(bot, "fetch: response timeout exceeded.")

    else:
        if (bot.remote['mid'].startswith("4") or bot.remote['mid'].startswith("5")) and bot.remote['mid'] != "462":
            if bot.config.get(bot.network, 'traceback_notice_channel') == "yes" or bot.previous['user'][0] != "#":
                sendto = bot.previous['user'] or self.admin
            else:
                sendto = bot.admin

            reply(sendto, "Message from %s: Error #%s: %s" % (bot.remote['server'], bot.remote['mid'], bot.remote['message']))
        if not bot.init['joined'] and not bot.init['registered']:
            autojoin()
Exemple #3
0
    def __init__(self, path):
        scanner.scan(fileTagger.taggerManager, path, self.event_scanFinished)
        self.path = path
        self.basename = os.path.basename(path)
        self.__loadUI()
        self.pathLabel.setText(self.basename)
        self.pathLabel.setToolTip(self.path)

        self.resource = fileTagger.taggerManager.registerTagger(self.path)
        self.__setTagLabel()
        self.resultList = {}
def extract_data(window):
    """
    * Parameter: window (Qt mainWindow object)
    * Execute method set_categories_status
    * Execute method find_feature
    * Execute method set_main_fields
    """

    scan(window.tree, window.category_box, window.feature_box)
    window.load_category()
    set_categories_status(window)
    find_feature(window)
    set_main_fields(window)
Exemple #5
0
def main():
    setup_logger()

    arg_parser = ArgumentParser(version='Picasa2Smugmug 0.1')
    arg_parser.add_argument('--path', required=True, help='Directory to start scanning from')
    arg_parser.add_argument('--nickname', required=True, help='Nickname of SmugMug account')
    # arg_parser.add_argument('--action',
    #                         choices=['download', 'upload', 'sync'],
    #                         default='upload',
    #                         help='Action to perform')
    args = vars(arg_parser.parse_args())

    scan(base_dir=args['path'], nickname=args['nickname'], reset=False)
Exemple #6
0
def scan_thread():
    '''image scanning thread'''
    state = mpstate.camera_state

    while not state.unload.wait(0.02):
        try:
            # keep the queue size below 100, so we don't run out of memory
            if state.scan_queue.qsize() > 100:
                (frame_time,im) = state.scan_queue.get(timeout=0.2)
            (frame_time,im) = state.scan_queue.get(timeout=0.2)
        except Queue.Empty:
            continue

        t1 = time.time()
        im_full = numpy.zeros((960,1280,3),dtype='uint8')
        im_640 = numpy.zeros((480,640,3),dtype='uint8')
        scanner.debayer_full(im, im_full)
        scanner.downsample(im_full, im_640)
        regions = scanner.scan(im_640)
        t2 = time.time()
        state.scan_fps = 1.0 / (t2-t1)
        state.scan_count += 1

        state.region_count += len(regions)
        if state.transmit_queue.qsize() < 100:
            state.transmit_queue.put((frame_time, regions, im_full, im_640))
Exemple #7
0
def output_quote(bot, quotes, regexp =  False):
    import scanner
    
    ids = []
    if regexp != False:
        regexp = re.compile(regexp, re.L | re.M | re.U)
    for q in quotes.naive():
        if regexp != False and regexp.search(q.message) == None:
            continue
        ids.append(q.id)
    
    if len(ids) == 0:
        return "No matching quotes were found."
        
    quote = Quote.get(Quote.id == choice(ids))

    fmt = "%s | "
    if quote.action:
        fmt += "* %s"
    else:
        fmt += "<%s>"
    fmt += " %s"
    
    output = fmt % (str(datetime.datetime.fromtimestamp(int(quote.time))), quote.nick, quote.message)
    output = output.encode('utf8')
    result = scanner.scan(bot, output) or ''
    
    return '\n'.join([output, result])
Exemple #8
0
def parse() :
    """reads the input program and builds an operator tree for it,

       input: the program to be analyzed, entered from the console as a string
              P ::=  D ; C
       output: the operator tree,  PTREE ::=  [ DLIST, CLIST ]
    """
    global wordlist
    import scanner   # import and link to scanner module
    print "Type program; OK to do it on multiple lines; terminate with  !"
    print "  as the first symbol on a line by itself:"
    print
    text = ""
    line = raw_input("" )
    while line[0] != "!" :
        text = text + " " + line
        line = raw_input("" )
    
    print text
    wordlist = scanner.scan(text)   # initialize parser with program's words
    print wordlist
    getNextword()
    # assert: invariant for nextword and wordlist holds true here
    dtree = parseDECLIST()
    ctree = parseCMDLIST()   
    tree =[dtree, ctree]
    # assert: tree holds the entire operator tree for  text
    #print tree
    if nextword != EOF :
       error("there are extra words")
    return tree
Exemple #9
0
 def update(self):
     self.updating=True
     newRows=scanner.scan(self.root,self.dataDict)
     modified=False
     if len(newRows)<len(self.dataDict):
         modified=True
     else:
         for row in newRows:
             if row[0] in self.dataDict:
                 if row!=self.dataDict.get(row[0]):
                     modified=True
                     break
             else:
                 modified=True
                 break
             
     if modified:
         self.dataDict={}
         self.fileTable.setRowCount(len(newRows))
         flags=[QtCore.Qt.ItemIsUserCheckable,0,QtCore.Qt.ItemIsEditable]
         for j in xrange(0,3):
             flags[j]=flags[j]|QtCore.Qt.ItemIsEnabled
         for i in xrange(0,len(newRows)):
             row=newRows[i]
             for j in xrange(0,3):
                 item=QtGui.QTableWidgetItem(row[j])
                 item.setFlags(flags[j])
                 if j==0:
                     item.setCheckState(QtCore.Qt.Unchecked)
                 self.fileTable.setItem(i,j,item)
             self.dataDict[row[0]]=row
         self.fileTable.resizeColumnToContents(0)
         self.fileTable.resizeColumnToContents(1)
     self.updating=False
Exemple #10
0
    def test_finds_moved_files(self):
        db = file_db.FileDB(os.path.join(self.tmpdir, 'test.db'))
        backup_dir = os.path.join(self.tmpdir, 'to_backup')
        os.mkdir(backup_dir)
        backup_id = db.add_backup('local', 'test_backup', backup_dir)
        
        loose_file_local_path = 'loose-file.txt'
        loose_file_moved_path = 'loose-file-moved.txt'
        loose_file_mtime = 42.123
        loose_file_digest = b'\xe8\x0bP\x17\t\x89P\xfcX\xaa\xd8<\x8c\x14\x97\x8e'
        loose_file_entry = scanner.ScanResult.FileEntry(backup_id, loose_file_moved_path, old_local_path=loose_file_local_path, update_time=loose_file_mtime, digest=loose_file_digest)

        backup_file_id = db.add_backup_file(backup_id, '', loose_file_mtime, loose_file_digest)
        db.add_local_file(backup_id, loose_file_local_path, backup_file_id)

        loose_file_path = '%s/%s' % (backup_dir, loose_file_moved_path)
        with open(loose_file_path, 'w') as loose:
            loose.write('abcdef')
        os.utime(loose_file_path, (0, loose_file_mtime))

        changes = scanner.scan('local', 'test_backup', db)        

        self.assertCountEqual(changes.added, [])
        self.assertCountEqual(changes.modified, [])
        self.assertCountEqual(changes.moved, [ loose_file_entry ])
        self.assertCountEqual(changes.removed, [])
Exemple #11
0
def search(bot, args):
	if len(args) >= 2:
		title = ""
		
		if args[1].startswith("cr="):
			expr = __import__('re').search('cr=([a-zA-Z]{2})$', args[1])
			if expr:
				country = expr.group(1).upper()
				if country == "CN":
					return "google.cn? hah."
			else:
				return "Invalid country code."
			terms = ' '.join(args[2:])
		else:
			country = ""
			terms = ' '.join(args[1:])
		result = urllib2.urlopen("http://ajax.googleapis.com/ajax/services/search/web?v=1.0&safe=off&q=%s&gl=%s" % (urllib2.quote(terms), country), timeout = 5)
		jsondata = json.load(result)
		try:
			url = jsondata['responseData']['results'][0]['unescapedUrl'].encode('utf-8')
			if url.startswith("http://www.youtube.com/") or url.startswith("https://www.youtube.com/"):
				import scanner
				title = "\n" + scanner.scan(bot, url)
			if country:
				return "From %s only: %s%s" % (country, url, title)
			else:
				return "%s%s" % (url, title)
		except IndexError:
			return "Your search did not return any results."
	else:
		return "Usage: !%s [cr=<2-letter country code>] <query>" % args[0]
Exemple #12
0
def parse() :
    """reads the input program, initializes the  nextword and wordlist,
       and builds an operator tree

       input: the program to be analyzed, entered from the console as a string
       output: the operator tree
    """
    global wordlist
    import scanner   # import and link to scanner module
    print "Type program; OK to do it on multiple lines; terminate with  !"
    print "  as the first symbol on a line by itself:"
    print
    text = ""
    line = raw_input("" )
    while line[0] != "!" :
        text = text + " " + line
        line = raw_input("" )
    
    wordlist = scanner.scan(text)   # initialize parser with program's words
    getNextword()
    # assert: invariant for nextword and wordlist holds true here

    tree = parsePROGRAM()
    # assert: tree holds the entire operator tree for  text
    #print "The parsed program is:"
    #print tree
    #print
    if nextword != EOF :
       error("there are extra words")
    return tree
Exemple #13
0
def scan_thread():
    '''image scanning thread'''
    state = mpstate.camera_state

    while not state.unload.wait(0.02):
        try:
            # keep the queue size below 100, so we don't run out of memory
            if state.scan_queue.qsize() > 25:
                (frame_time,im) = state.scan_queue.get(timeout=0.2)
            (frame_time,im) = state.scan_queue.get(timeout=0.2)
        except Queue.Empty:
            continue

        t1 = time.time()
        #im_full = numpy.zeros((600,800,3),dtype='uint8')
        im_640 = numpy.zeros((480,640,3),dtype='uint8')
        #scanner.debayer_full(im, im_full)
        #scanner.downsample(im_full, im_640)
        #cv.SaveImage("/tmp/downsampled.jpg",cv.fromarray(im))
        scanner.downsample(im, im_640)
        #cv.SaveImage("/tmp/downsampled.jpg",cv.fromarray(im))
        #cv.SaveImage("/tmp/downsampled_640.jpg",cv.fromarray(im_640))
        regions = cuav_region.RegionsConvert(scanner.scan(im_640))
        t2 = time.time()
        state.scan_fps = 1.0 / (t2-t1)
        state.scan_count += 1
        print regions
        #regions = cuav_region.filter_regions(im, regions, min_score=state.minscore)
        regions = cuav_region.filter_regions(im, regions, min_score=0)
        #regions = cuav_region.filter_regions(im_full, regions, min_score=state.minscore)
        print regions
        state.region_count += len(regions)
        if state.transmit_queue.qsize() < 50:
        #if state.transmit_queue.qsize() < 2:
            state.transmit_queue.put((frame_time, regions, im, im_640))
Exemple #14
0
def gc():
    libmap = set()
    collmap = {
            'Movie': set(),
            'Person': set(),
            'Company': set(),
            }
    snapmap = set()

    result = scanner.scan()
    for dirpath, title, year in result:
        libmap.add(dirpath)

        item = db.Library.find_one({ 'dirpath':  dirpath })
        if not item:
            continue

        collmap['Movie'].add(item['ID'])

        movie = db.Movie.find_one({ 'ID': item['ID'] })
        if not movie:
            continue

        for key in movie.keys():
            if type(movie[key]) is list:
                for i in movie[key]:
                    if type(i) is dict and 'type' in i and 'ID' in i:
                        collmap[i['type']].add(i['ID'])
            elif type(movie[key]) is dict and 'type' in movie[key] and 'ID' in movie[key]:
                collmap[movie[key]['type']].add(movie[key]['ID'])

    count = 0
    for i in db.Library.find({}, { 'dirpath': 1, 'file': 1 }):
        if i['dirpath'] not in libmap:
            db.Library.remove({ 'dirpath': i['dirpath'] })
            count += 1
        for f in i['file']:
            if 'snapshot' in f:
                snapmap.add(f['snapshot'])

    print '[GC] Library: %d' % count

    for coll in collmap:
        count = 0
        for i in db[coll].find({}, { 'ID': 1 }):
            if i['ID'] not in collmap[coll]:
                db[coll].remove({ 'ID': i['ID'] })
                count += 1
        print '[GC] %s: %d' % (coll, count)

    count = 0
    snapdir = os.path.join(config.STATIC_DIR, 'snapshot')
    for entry in os.listdir(snapdir):
        snapfile = os.path.join(snapdir, entry)
        if entry not in snapmap and os.path.isfile(snapfile):
            os.remove(snapfile)
            count += 1
    print '[GC] Snapshot: %d' % count
Exemple #15
0
def determine_namespaces(source):
    """Return a list of the namespaces used in the data at this
    source.    

    Talk to local scanner, or use the web to talk to the source's
    scanner?  Nah, in any case use our own scanner, which may talk to
    the source's scanner.
    """
    return [x for x in scanner.scan(source)]
Exemple #16
0
def loop_scanning():

    while scanner_work != False:
        job = scanner.scan(False)
        if job != False:
            write_text(job['title'], job['href'])
            mixer.music.play()

        sleep(60)
Exemple #17
0
    def test_works_when_backup_dir_missing(self):
        db = file_db.FileDB(os.path.join(self.tmpdir, 'test.db'))
        backup_dir = os.path.join(self.tmpdir, 'to_backup')
        db.add_backup('local', 'test_backup', backup_dir)
        changes = scanner.scan('local', 'test_backup', db)

        self.assertFalse(changes.added)
        self.assertFalse(changes.modified)
        self.assertFalse(changes.moved)
        self.assertFalse(changes.removed)
Exemple #18
0
	def parseJava(self, javaFile, javapFiles):
		#initialization junk
		sourceData = {}
		sourceData['class_names'] = []
		sourceData['constants'] = []
		sourceData['method_refs'] = collections.defaultdict(list)
		sourceData['lines'] = []
		sourceData['line_table'] = collections.defaultdict(list)
		sourceData['has_main'] = False

		for javapFile in javapFiles:
			#get the main class name
			for line in javapFile:
				m = re.match(r"^.*(?:class|interface) ([^\s<>]+).*?$", line)
				if m:
					class_name = m.group(1)
					sourceData['class_names'].append(class_name)
					break

			for line in javapFile:
				#get constant pool
				if line == "Constant pool:\n":
					constants = readConstantPool(javapFile)
					break

			prev_line_num = 0
			for line in javapFile:
				#get the instruction list for a given method
				m = re.match(method_declaration_re,line)
				if m:
					(method_name, m_types) = (m.group(1), m.group(2))
					#prev_line_num = len(sourceData['line_table'])
					instructions = readInstructions(javapFile, constants)
					(sourceData['line_table'], first_line_read,last_line_read) = readLineTable(javapFile, instructions, sourceData['line_table'])
					if prev_line_num > first_line_read:
						prev_line_num = 0
					sourceData['method_refs'][method_name].append((class_name, m_types, prev_line_num, first_line_read))
					prev_line_num = last_line_read
		if 'main' in sourceData['method_refs']:
			sourceData['has_main'] = True
		#get the info for each line in the source files
		line_tokens = []
		for line_num, toks in enumerate(scan(open(javaFile))):
			line = []
			for tok in toks:
				annotate_token(tok, line_num+1, sourceData)
				line.append(tok)
			line_tokens.append(line)
		sourceData['lines'] = line_tokens
		if len(sourceData['class_names']) is 0:
			sourceData['class_name'] = 'Unknown'
		else:
			sourceData['class_name'] = min(sourceData['class_names'], key=len)
		return sourceData
Exemple #19
0
 def _setup(self):
     if self.state is None:
         if global_state is not None:
             self.state = global_state
         else:
             self.state = scanner.scan()
     self.start_time = time.time()
     self.files_to_run = set(self.state.affected).union(set(self.state.bad))
     if len(self.state.affected):
         info("changed files: %s" % (get_paths(*self.state.affected),))
     if len(self.state.bad):
         info("bad files: %s" % (get_paths(*self.state.bad),))
Exemple #20
0
 def highlight(self, line):
     line_txt = self.get('%d.0' % line, '%d.end' % line)
     self.tag_remove('keyword', '%d.0' % line, '%d.end' % line)
     self.tag_remove('string', '%d.0' % line, '%d.end' % line)
     self.tag_remove('error', '%d.0' % line, '%d.end' % line)
     for token in scanner.scan(line_txt+'\n'):
         if token.type in scanner.keywords:
             self.tag_add('keyword', '%d.%d' % (line, token.start), '%d.%d' % (line, token.end))
         elif token.type == 'string':
             self.tag_add('string', '%d.%d' % (line, token.start), '%d.%d' % (line, token.end))
         elif token.type == 'error':
             self.tag_add('error', '%d.%d' % (line, token.start), '%d.%d' % (line, token.end))
Exemple #21
0
def setup():
	adapters = scanner.scan("OBD")
	if len( adapters ) == 0:
		print "[!]\tNo adapters were found that have 'OBD' in their name.\nExiting..."
	else:
		global adapter
		adapter = OBD( type="bluetooth", addr=adapters[0]['addr'], name=adapters[0]['name'], baud=BAUD )
		adapter.bind()
		adapter.connect()
		print(SendOBD("ate0"))
		print(SendOBD("atl0"))
		print(SendOBD("ath0"))
Exemple #22
0
def main():
    db = pymongo.Connection().bigodb
    result = scanner.scan()
    for dirpath, title, year in result:
        item = db.Library.find_one({ 'dirpath': dirpath })
        if not item:
            continue

        nfo = nfoutil.get_nfo(dirpath)
        imdb_id = nfoutil.extract_imdb_id(nfo)

        if not imdb_id:
            print 'echo "http://www.imdb.com/title/tt%s/" > %s/.bigodb.nfo' % (item['ID'], dirpath)
Exemple #23
0
def scan_thread():
    """thread for scanning for Joe"""
    total_time = 0
    count = 0
    while True:
        frame_time, im = state.scan_queue.get()
        t0 = time.time()
        regions = scanner.scan(im)
        t1 = time.time()
        total_time += t1 - t0
        count += 1
        for i in range(opts.scan_skip):
            frame_time, im = state.scan_queue.get()
def get_latest_tag():
    cl, cc = vim.current.window.cursor
    cb = vim.current.buffer
    # doesn't want </tag>
    for lineno in reversed(xrange(cl)):
        line = cb[lineno]
        # first line begins from current col
        if is_current(lineno):
            line = line[0 : cc + 1]
        result = scan(line, lineno)
        if not result is None:
            return result
    return None
Exemple #25
0
def bluetooth():
	
	adapters = scanner.scan( "OBD" )

	if len( adapters ) == 0:
		print "[!]\tNo adapters were found that have 'OBD' in their name.\nExiting..."

	# Adapters were found.
	else:
		# Grab the first adapter returned.
		# adapter = OBD( adapters[0]['addr'], adapters[0]['name'], BAUD )
		adapter = OBD( type="bluetooth", addr=adapters[0]['addr'], name=adapters[0]['name'], baud=BAUD )
		adapter.bind()
		adapter.connect()
Exemple #26
0
def get_tokens(filename, options):
    tokens = None
    with open(filename, 'r') as f:
        tokens = scanner.scan(f.read())

    if options.stage == 'scanner':
        # If stdlib files were not included, we print regardless.
        # Otherwise, print only if it's not an stdlib file, unless overridden
        # by JooscOptions.print_stdlib.
        if options.include_stdlib == False or filename not in opts.stdlib_files or \
                options.print_stdlib == True:
            print("Tokens returned from scanner for %s:\n" % filename,
                pprint.pformat(tokens))

    return tokens
def bluetooth():
	"""Bluetooth OBD-II Range Test

	This method manages all range testing of a bluetooth OBD-II adapter.
	"""

	# Scan for all adapters.
	adapters = scanner.scan( "OBD" )

	# No adapters were found.
	if len( adapters ) == 0:
		print "[!]\tNo adapters were found that have 'OBD' in their name.\nExiting..."

	# Adapters were found.
	else:
		# Grab the first adapter returned.
		# adapter = OBD( adapters[0]['addr'], adapters[0]['name'], BAUD )
		adapter = OBD( type="bluetooth", addr=adapters[0]['addr'], name=adapters[0]['name'], baud=BAUD )
		adapter.bind()
		adapter.connect()

		# Setup the file manager.
		fm = FileManager()

		# Write header to CSV file.
		fm.writeCSV( csvfile, [ "Iteration", "RX/TX Time" ] )

		# Save the starting time.
		starttime = datetime.now()

		###
		# Run the range test.
		###
		test( adapter, fm )

		# Get the time when testing completes.
		finishtime = datetime.now()

		# Create a plot of the values.
		columns = getColumns( fm.readCSV( csvfile ) )

		# Create plot.
		figurename = plotter.generateLinePlot( columns["Iteration"][1:len(columns["Iteration"])], columns["RX/TX Time"][1:len(columns["RX/TX Time"])], "Bluetooth Range Test", "Iteration", "(RX - TX) Time [sec]", ("rangetest_" + finishtime.strftime( "%H_%M_%S" )), "png" )

		# Write ending results.
		print "\tTime to completion: " + str( finishtime - starttime )
		print "\tCSV File: " + csvfile
		print "\tPlot Image: " + figurename
Exemple #28
0
    def run(self):
        global scan_running, library, image_dict
        #will be run as the background worker thread, all web calls need to be invoked
        (new_lib, new_image_dict) = scanner.scan(self.basepath, lambda msg: self.InvokePostMessage(msg));
        library = new_lib;
        image_dict = new_image_dict;
        self.InvokePostMessage("Library scan complete", "libraryUpdate");
        #write the new db out to file
        try:
            with open("astarael.db", "wb") as f:
                pickle.dump((library, image_dict), f)
        except Exception as ex:
            self.InvokePostMessage("Error writing to database: " + str(ex));
        else:
            self.InvokePostMessage("Library written to database")

        scan_running = False
Exemple #29
0
    def test_finds_modified_files(self):
        db = file_db.FileDB(os.path.join(self.tmpdir, 'test.db'))
        backup_dir = os.path.join(self.tmpdir, 'to_backup')
        os.mkdir(backup_dir)
        backup_id = db.add_backup('local', 'test_backup', backup_dir)

        loose_file_local_path = 'loose-file-touched-but-contents-same.txt'
        loose_file_mtime = 42.123
        loose_file_mtime_updated = loose_file_mtime + 3
        loose_file_digest = b'\xe8\x0bP\x17\t\x89P\xfcX\xaa\xd8<\x8c\x14\x97\x8e'
        loose_file_entry = scanner.ScanResult.FileEntry(backup_id, loose_file_local_path, update_time=loose_file_mtime_updated, digest=loose_file_digest)

        backup_file_id = db.add_backup_file(backup_id, '', loose_file_mtime, loose_file_digest)
        db.add_local_file(backup_id, loose_file_local_path, backup_file_id)

        loose_file_path = '%s/%s' % (backup_dir, loose_file_local_path)
        with open(loose_file_path, 'w') as loose:
            loose.write('abcdef')
        os.utime(loose_file_path, (0, loose_file_mtime_updated))

        os.mkdir('%s/empty-subdir' % backup_dir)

        os.mkdir('%s/subdir' % backup_dir)

        file_in_subdir_local_path = 'subdir/file-in-subdir-contents-changed.txt'
        file_in_subdir_path = '%s/%s' % (backup_dir, file_in_subdir_local_path)
        file_in_subdir_mtime = 84.246
        file_in_subdir_mtime_updated = 84.246 + 3
        file_in_subdir_digest = b'&\xe1b\xd0\xb5paA\xbd\xb9T\x90\n\xeb\xe8\x04'
        file_in_subdir_digest_updated = b'\xd2\xec\x9b2\xd6tAM\x11\x125>\xa8\x80w\x9b'
        file_in_subdir_entry = scanner.ScanResult.FileEntry(backup_id, file_in_subdir_local_path, update_time=file_in_subdir_mtime_updated, digest=file_in_subdir_digest_updated)

        backup_file_id = db.add_backup_file(backup_id, '', file_in_subdir_mtime, file_in_subdir_digest)
        db.add_local_file(backup_id, file_in_subdir_local_path, backup_file_id)

        with open(file_in_subdir_path, 'w') as file_in_subdir:
            file_in_subdir.write('ghijklmnop')
        os.utime(file_in_subdir_path, (0, file_in_subdir_mtime_updated))

        changes = scanner.scan('local', 'test_backup', db)        

        self.assertCountEqual(changes.added, [])
        self.assertCountEqual(changes.modified, [ file_in_subdir_entry ])
        self.assertCountEqual(changes.moved, [])
        self.assertCountEqual(changes.removed, [])
Exemple #30
0
def main():
    p = argparse.ArgumentParser()
    p.add_argument('rootdir', metavar='rootdir', nargs='?', default='.', help='the directory to scan (optional: default is the current directory)')
    args = p.parse_args()
    
    root = os.path.abspath(args.rootdir)
    
    sanity.ensure(root)
    
    files = scanner.scan(root)
    defs = list(parser.parse(root, files))
    
    fix_methods = False
    if fix_methods:
        results = analyzer.analyze(root, files, defs)
        fixer.fix_method_declarations(root, defs, results)
    else:
        a2 = analyzer.analyze2(root, files, defs)
        fixer.fix2(root, defs, files, a2)
Exemple #31
0
import os
import klopfer
import scanner
from sys import argv

if argv[1] == '--debug':
    image_path = argv[2]
    scanner = scanner.Scanner(image_path)
    informations = scanner.scan()
    for information in informations:
        print 'data: ' + information.data + ' center_x: ' + str(
            information.center_x)

else:
    import_dir = argv[1]
    export_dir = argv[2]

    if not os.path.isdir(import_dir):
        print 'Klopfer says: import directory is not a directory'

    if not os.path.isdir(export_dir):
        print 'Klopfer says: export directory is not a directory'

    klopfer = klopfer.Klopfer(import_dir, export_dir)
    try:
        klopfer.run()
    except Exception as error:
        print error
        try:
            klopfer.remove_image()
        except:
Exemple #32
0
from scanner import Scanner as scan

installer = scan()
Exemple #33
0
from code_generator import CodeGenerator
from environment import Environment
from scanner import join_tokens, scan
from analyzer import Parser
#from environment import environment
import os

expr = raw_input('expression: ')
tokens = join_tokens(scan(expr))
print("STEP 1")
print("Generated token list: ")
print(tokens)
parser = Parser(tokens)
tree = parser.parse()
print("STEP 2")
print("Parsing token list to binary tree")
print(tree)
code_generator = CodeGenerator(orders_list=[])
code_generator.postorder(tree)
code_generator.orders_list.append('end')
print("STEP 3")
print("Postorder")
print(code_generator.orders_list)
environment = Environment(code_generator.orders_list)
result = environment.count()
print("RESULT: ")
print(result)
Exemple #34
0
from scanner import scan
from utils import *
from takePicture import *
import cv2

video_capture()

cv2.namedWindow('original document', cv2.WINDOW_NORMAL)
cv2.resizeWindow('original document', 1200, 1200)

cv2.namedWindow('processed document', cv2.WINDOW_NORMAL)
cv2.resizeWindow('processed document', 1200, 1200)

# cv2.namedWindow('resized document', cv2.WINDOW_NORMAL)
# cv2.resizeWindow('resized document', 1200, 1200)

img = cv2.imread("./images/image0.jpg")
doc = scan(img)

# res = resize(doc)

cv2.imshow("original document", img)
kernel = np.ones((3, 3), np.uint8)

doc = cv2.erode(doc, kernel, iterations=1)
doc = cv2.dilate(doc, kernel, iterations=1)
cv2.imshow("processed document", doc)

cv2.imwrite("./processed/processed3.jpg", doc)

cv2.waitKey(0)
for test_case in sorted(os.listdir(test_dir)):
    if args.to_test and i >= args.to_test:
        break
    if i < args.from_test - 1:
        i += 1
        continue
    test_case_dir = os.path.join(test_dir, test_case)
    output_dir = os.path.join(script_dir, "output")
    test_files = list(os.listdir(test_case_dir))
    input_file = os.path.join(test_case_dir,
                              test_files.pop(test_files.index("input.txt")))

    try:
        if test_case.startswith("TS"):
            scan(input_file)
        else:
            parse(input_file)
    except Exception as e:
        # raise e
        print("Execution failed:", str(e))
        fail = True
    else:
        fail = False

    if not fail:
        if test_case.startswith("TXX"):
            if os.name == "nt":
                tester_file = os.path.join(script_dir, "interpreter",
                                           "tester_Windows.exe")
            elif os.name == "posix":
Exemple #36
0
            if tag == 'replace':
                for t in expected[i1:i2]:
                    print('-', pretty_token(t))
                for t in actual[j1:j2]:
                    print('+', pretty_token(t))


cwd = pathlib.Path.cwd()

keywords, tokens = open('keywords.json'), open('tokens.json')
regexp = scanner.Regexp.from_files(keywords, tokens)
for path in (cwd / 'examples').glob('*.b'):
    filename = path.relative_to(cwd / 'examples')
    expected_filename = (cwd / 'tests' / filename).with_suffix('.out')
    content = path.open().read()
    tokens = [reify_token(t) for t in scanner.scan(0, regexp, content)]
    if sys.argv[1] == 'load':
        with open(expected_filename, 'rb') as f:
            expected = pickle.load(f)
        sm = difflib.SequenceMatcher(a=expected, b=tokens, autojunk=False)
        if USE_GROUPS:
            opcodes = list(sm.get_grouped_opcodes())
            if opcodes != []:
                print(f'different result for {filename}')
                print_diff_groups(opcodes, expected, tokens)
                break
        else:
            opcodes = list(sm.get_opcodes())
            if opcodes != [('equal', 0, len(tokens), 0, len(tokens))]:
                print(f'different result for {filename}')
                print_diff(opcodes, expected, tokens)
Exemple #37
0
	def scan_library(self):
		c = scan(self)
		print('Indexed {} songs in {}'.format(c['count'],str(c['time']).split('.')[0]))
		return c
Exemple #38
0
import loader, scanner, updater
import threading

# Load initial data.
loader.load()

# threads = list()
# threads.append(threading.Thread(target=scanner.scan))
# threads.append(threading.Thread(target=updater.update))

scanner.scan()
Exemple #39
0
def main(args):
    with tqdm(total=(6 if args.keep else 7), unit="step") as bar:
        tqdm.write("Converting input data...")
        files = scan(args.input_dir, "*.mid")
        files = [(args, file) for file in files]
        for e in tqdm(worker_pool.imap_unordered(midi_to_csv, files),
                      total=len(files),
                      unit="files"):
            if e:
                tqdm.write(e)
        bar.update(1)

        tqdm.write("Cleaning input data...")
        files = scan(args.output_dir, "**/*_full.csv")
        for e in tqdm(worker_pool.imap_unordered(clean, files),
                      total=len(files),
                      unit="files"):
            if e:
                tqdm.write(e)
        bar.update(1)

        tqdm.write("Splitting channels...")
        files = scan(args.output_dir, "**/*_full.csv")
        for e in tqdm(worker_pool.imap_unordered(extract_channels, files),
                      total=len(files),
                      unit="files"):
            if e:
                tqdm.write(e)
        bar.update(1)

        tqdm.write("Removing empty channels...")
        files = scan(args.output_dir, "**/channel_*.csv", True)
        for e in tqdm(worker_pool.imap_unordered(check_channel, files),
                      total=len(files),
                      unit="files"):
            if e:
                tqdm.write(e)
        bar.update(1)

        tqdm.write("Splitting tracks...")
        files = scan(args.output_dir, "**/channel_*.csv", True)
        for e in tqdm(worker_pool.imap_unordered(extract_tracks, files),
                      total=len(files),
                      unit="files"):
            if e:
                tqdm.write(e)
        bar.update(1)

        tqdm.write("Converting output data...")
        files = scan(args.output_dir, "**/channel_*.csv", True)
        files += scan(args.output_dir, "**/track_*.csv", True)
        for e in tqdm(worker_pool.imap_unordered(csv_to_midi, files),
                      total=len(files),
                      unit="files"):
            if e:
                tqdm.write(e)
        bar.update(1)

        if not args.keep:
            tqdm.write("Removing intermediary artifacts...")
            files = scan(args.output_dir, "**/*.csv", True)
            files = [f["path"] for f in files]
            for e in tqdm(worker_pool.imap_unordered(remove, files),
                          total=len(files),
                          unit="files"):
                if e:
                    tqdm.write(e)
            bar.update(1)

        tqdm.write("Finished processing")
Exemple #40
0
def scan_loop(sharedList):
    print "Scan starting"
    scanner.scan(sharedList)
Exemple #41
0
#!/usr/bin/env python
import socket
import sys
import listener
import scanner

# Print a nice banner with information on which host we are about to scan
print "_" * 60
print " "
print "Pentest.me port scanner and regular listener"
print "_" * 60

try:
    if (sys.argv[1] == "--scan-me"):
        ip = socket.gethostbyname(socket.getfqdn())
        scanner.scan(ip)

    if (sys.argv[1] == "--url"):
        #then sys.argv[2] == http://...
        ip = socket.gethostbyname(sys.argv[2])
        scanner.scan(ip)

    if (sys.argv[1] == "--listen-me"):
        print "Start Listener"
        listener

except:
    print 'using : \n pentestme.py --scan-me\n pentestme.py --url http://scan-to-port.com\n pentestme.py --listen-me '
    sys.exit()
Exemple #42
0
def main():
    # Parse arguments
    args = parse_args()

    # Conduct Scan
    if (args.host is not None):
        if args.ports is not None:
            scan = scanner.scan(args.host, args.ports)
        else:
            scan = scanner.scan(args.host)
    else:
        print("Please enter a valid IP")
        return

    # Begin searching for vulnerabilities
    # Search caches DB
    scan = scan['scan']

    # ResultDict contains a dict of lists, port number+ service is the key and list of vulns is the value
    resultDict = {}
    for hkey, host in scan.items():
        for pkey, port in host['tcp'].items():
            svname = port['product'] + " " + port["version"]
            if (args.dumb is True):
                # Dont query TI
                # TODO
                pass
            else:
                # Do query TI
                id = str(hkey) + "_" + str(pkey) + "_" + svname
                resultDict[id] = queryTI(svname)
                # Do query explicit tests
                #tests = dbhandler.getTests(str(pkey), port['name'])
                #for t in tests:
                #    exec(t[3],globals())
                #    try:
                #        if (test(hkey, pkey)):
                #            resultDict[str(t[0])+"-"+id] = (t[4],)
                #    except:
                #        pass

    # Clean results wihtout vulnerabilties
    dlist = []
    for k, v in resultDict.items():
        if len(v) == 0:
            dlist.append(k)
    for key in dlist:
        del resultDict[key]

    if args.FILE is not None:
        for k, v in resultDict.items():
            for vuln in v:
                resultDict[k] = str(vuln)

        outputResults(args.FILE, resultDict)
    else:
        # output to stdout
        for k, v in resultDict.items():
            print("\nHost_Port_Service: " + k)
            for vuln in v:
                print(str(vuln) + "\n")
Exemple #43
0
def handler(event, context):
    """
    Bucket connect and config
    """
    BUCKET_NAME = 'thefelpsbucket01'
    file_obj = event['Records'][0]
    logger.info("FILE_OBJ: " + str(file_obj))
    mykey = str(file_obj['s3']['object']['key'])
    logger.info("MYKEY: " + mykey)
    s3 = boto3.resource('s3')
    try:
        s3.Bucket(BUCKET_NAME).download_file(mykey, '/tmp/' + mykey)
        logger.info("File downloaded from bucket to /tmp/")
        BASE_DIR = '/tmp/'
        txtfiles = []
        for file in os.listdir(BASE_DIR):
            if file.endswith(".TXT"):
                txtfiles.append(file)
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "404":
            logger.info("The object does not exist.")
        else:
            raise
    """
    DB connect and config
    """
    try:
        conn = pymysql.connect(rds_host,
                               user=name,
                               passwd=password,
                               db=db_name,
                               connect_timeout=5)
    except:
        logger.error(
            "ERROR: Unexpected error: Could not connect to MySql instance.")
        sys.exit()
    logger.info("SUCCESS: Connection to RDS mysql instance succeeded")

    txtfiles = scanner.scan()
    """
    Loop through all files within '/tmp/'
    """
    for txtfile in txtfiles:
        filename = txtfile
        with open(os.path.join(BASE_DIR, filename), encoding='latin1') as file:
            data = file.readlines()
            tipo0 = tipo1 = tipo2 = tipo3 = tipo4 = tipo5 = tipo9 = 0
            """
            This function fetches content from mysql RDS instance
            """
            item_count = 0
            with conn.cursor() as cur:
                try:
                    cur.execute(db.create_type_0())
                except:
                    logger.info("Skipping creation of table Type 1")
                try:
                    cur.execute(db.create_type_1())
                except:
                    logger.info("Skipping creation of table Type 1")
                try:
                    cur.execute(db.create_type_2())
                except:
                    logger.info("Skipping creation of table Type 2")
                try:
                    cur.execute(db.create_type_3())
                except:
                    logger.info("Skipping creation of table Type 3")
                try:
                    cur.execute(db.create_type_4())
                except:
                    logger.info("Skipping creation of table Type 4")
                try:
                    cur.execute(db.create_type_5())
                except:
                    logger.info("Skipping creation of table Type 5")
                try:
                    cur.execute(db.create_type_9())
                except:
                    logger.info("Skipping creation of table Type 9")
                for i in range(len(data)):
                    if data[i][0:1] == '0':
                        tipo0 += 1
                        regType0(data[i], cur)
                        conn.commit()
                    elif data[i][0:1] == '1':
                        tipo1 += 1
                        regType1(data[i], cur)
                        conn.commit()
                    elif data[i][0:1] == '2':
                        tipo2 += 1
                        regType2(data[i], cur)
                        conn.commit()
                    elif data[i][0:1] == '3':
                        tipo3 += 1
                        regType3(data[i], cur)

                    elif data[i][0:1] == '4':
                        tipo4 += 1
                        regType4(data[i], cur)

                    elif data[i][0:1] == '5':
                        tipo5 += 1
                        regType5(data[i], cur)

                    elif data[i][0:1] == '9':
                        tipo9 += 1
                        regType9(data[i], cur)
                logger.info("\n**Register Counter**")
                logger.info("tipo_0: " + str(tipo0))
                logger.info("tipo_1: " + str(tipo1))
                logger.info("tipo_2: " + str(tipo2))
                logger.info("tipo_3: " + str(tipo3))
                logger.info("tipo_4: " + str(tipo4))
                logger.info("tipo_5: " + str(tipo5))
                logger.info("tipo_9: " + str(tipo9))
                logger.info("*******************")
            conn.commit()
    os.remove(os.path.join(BASE_DIR, filename))
Exemple #44
0
import scanner
import codegen
import Parser
import sys
import os

if __name__ == '__main__':
    if len(sys.argv) == 1:
        print("No argument")
        sys.exit()
    exp = sys.argv[1]
    Parse_ch = scanner.scan(exp)
    RPN = ''.join(Parser.expr(Parse_ch))
    codegen.gen(RPN)
    os.system("python3 ./a.out")
# Scan source file for tokens
from scanner import scan

tokens = scan('input.txt')


# Stack for parsing
class Stack:
    data = []

    def push(self, item):
        self.data.append(item)

    def pop(self):
        return self.data.pop()

    def empty(self):
        return len(self.data) == 0

    def top(self):
        return self.data[-1]


stack = Stack()
stack.push('pgm')

# Pointer for tokens iterating
pointer = 0

# Import grammar
from grammar import Grammar
Exemple #46
0
            response.headers.get('server')
        ]
        if exists(definition_path):
            log.warning('Definition {name} already exists'.format(name=name))
            exit()
        # Save definition
        f = file(definition_path, 'w')
        f.write(json.dumps(template, indent=4))
        print template
        exit()

    ###########################################################################
    # Scan
    #
    log.debug('Scanning...')
    hosts = scan(args.hosts, args.port, args.fast)
    if not hosts:
        log.debug(
            'No hosts found with port {port} open.'.format(port=args.port))
        exit()

    ###########################################################################
    # Fingerprint
    #

    # Load definitions DB
    definitions_db = {}
    for definition_path in glob('definitions/*.json'):
        try:
            definitions_db[basename(definition_path[:-5])] = json.loads(
                open(definition_path).read())
Exemple #47
0
import argparse
import sys
import socket
import scanner
from scanner import scan


parser = argparse.ArgumentParser(prog='rainfall')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
parser.add_argument('-p', '--ports', nargs=2, required=True, help='Port interval to scan')
parser.add_argument('-t', '--target', required=True, help='Target host')
parser.add_argument('-m', '--mode', nargs=1, required=True, help='scan mode: 1-syn, 2-xmas, 3-fin, 4-null, 5-ack ')


args = parser.parse_args()

try:
    beginPort = int(args.ports[0])
    endPort = int(args.ports[1])
    assert beginPort > 0 and endPort > 0 and beginPort <= endPort
except AssertionError:
    print "[ERROR] Port range is invalid - startPort must be <= endPort, both of which > 0"
    sys.exit()


target = args.target
mode = args.mode

scan(target, beginPort, endPort, int(mode[0]))
Exemple #48
0
def parse(inputStr):
    scanResult = scanner.scan(inputStr)
    tokens = scanResult["tokens"]

    # Use Dijkstra's shunting-yard algorithm. There are two stacks, one
    # is operator stack, one is operand stack which also include left parentheses.
    #
    # To deal with ~ operator, we push it onto the operator stack, when we read an
    # operand, we repeatedly pop  off negations until none remain.

    operators = []
    operands = []

    # if true, we are expecting an operand, else, we need an operator
    needOperand = True

    # Scan through the tokens
    for currToken in tokens:
        if (needOperand):
            # if is an operand, push it on the operand stack
            if isOperand(currToken):
                addOperand(wrapOperand(currToken), operands, operators)
                needOperand = False

            # if it is a parenthesis or negation, push it on operator stack.
            elif currToken["type"] == '(' or currToken["type"] == '~':
                operators.append(currToken)

            elif currToken["type"] == scanner.scannerConstantEOF:
                # if the operator stack is empty, the input was empty
                if len(operators) == 0:
                    parseError("", 0, 0)

                # if the operator stack has an ( on top, it is unmatched
                if topOf(operators)["type"] == '(':
                    parseError(
                        "Open parenthesis has no matching close parenthesis",
                        topOf(operators)["start"],
                        topOf(operators)["end"])

                # otherwise, it is an operator with no operand.
                parseError("This operator is missing an operand",
                           topOf(operators)["start"],
                           topOf(operators)["end"])

            else:
                parseError("We are excepting a variable", currToken["start"],
                           currToken["end"])

        # We are expecting either an operator or a close parenthesis
        else:
            if (isBinaryOperator(currToken)
                ) or currToken["type"] == scanner.scannerConstantEOF:
                # While there are high priority operators at the top of stack, evaluate them first.
                while True:
                    if len(operators) == 0:
                        break

                    if topOf(operators)["type"] == '(':
                        break

                    if priorityOf(topOf(operators)) <= priorityOf(currToken):
                        break

                    # only if priority of top of operators is greater than current token,
                    # evaluate them first
                    operator = operators.pop()
                    rhs = operands.pop()
                    lhs = operands.pop()

                    addOperand(createOperatorNode(lhs, operator, rhs),
                               operands, operators)

                # push this operator onto the operators stack.
                operators.append(currToken)

                #
                needOperand = True

                if currToken["type"] == scanner.scannerConstantEOF:
                    break

            # If this is a close parenthesis, we pop operators from the stack and evaluate
            # them until an open parenthesis. Then still search for an operator
            elif currToken["type"] == ')':
                # keep popping operators until "("
                while True:
                    if len(operators) == 0:
                        parseError("This '(' dones not match any ')')",
                                   currToken["start"], currToken["end"])

                    currOp = operators.pop()

                    if currOp["type"] == '(':
                        break

                    if currOp["type"] == '~':
                        parseError("Nothing is negated by this operator.",
                                   currToken["start"], currToken["end"])

                    # otherwise, it should be an operator, evaluate it.
                    rhs = operands.pop()
                    lhs = operands.pop()

                    addOperand(createOperatorNode(lhs, currOp, rhs), operands,
                               operators)

                # expose to negations.
                expr = operands.pop()
                addOperand(expr, operands, operators)

            # Anything else is an error
            else:
                parseError(
                    "Expecting a close parenthesis or a binary operator here",
                    currToken["start"], currToken["end"])

    # Successfuly parsed the input string
    # TODO: The operator stack should be empty. Check such errors.

    return {"ast": operands.pop(), "variables": scanResult["variables"]}
Exemple #49
0
        print('Initializing scanner')
        scanner = scanner.Scanner('rules/index.yar')

        print('Initializing outputs')
        csvOutput = outputs.CSV('logs/csv/')
        jsonOutput = outputs.JSON('logs/json/')

        while True:
            # Get gists
            print('Getting gists')
            gists = ep.scrape()

            print('Comparing to Yara rules')
            for gist in gists:
                results = scanner.scan(gist)

                #Outputs results if a match occurred
                if len(results) > 0:
                    print('Found match in gist {}'.format(gist['key']))
                    gist['rule'] = results

                    # Store record of gist in csv
                    csvOutput.store_data(
                        gist, 'gists.csv',
                        ['key', 'rule', 'user', 'full_url', 'scrape_url'])
                    # Store copy of gist
                    jsonOutput.store_data(gist, '{}.json'.format(gist['key']))

            print('Sleeping for 5 minutes')
            time.sleep(300)
Exemple #50
0
#!/usr/bin/env python
import django
django.setup()

from scanner.scan import *
#scan_segment('hotels','0x8ea119A7Ef0Ac4c1a83a3BB6D1aa1a3afcAfDE8b')
scan()

#print(get_org('0x98Fa47CFA890b12465775c723C072376FC64eE1e','hotels'))
Exemple #51
0
if len(args) < 1:
    scan_pattern = '.*'
else:
    scan_pattern = args[0]

if options.show_all:
    options.show = ['pass', 'n/a', 'fail', 'err', '????']
else:
    options.show = [v.strip() for v in options.show.split(',')]

if options.output_type == 'list':
    for method_name, method_ref in inspect.getmembers(output.Output,
                                                      inspect.ismethod):
        if not method_name.startswith('_'):
            print method_name
    sys.exit(1)

try:
    scanner = scanner.ScannerZip(sys.argv[0], debug=options.debug)
except zipfile.BadZipfile:
    scanner = scanner.ScannerSrc('scans', debug=options.debug)

results = scanner.scan(scan_pattern)
output = output.Output(results, show=options.show)
try:
    output_cb = getattr(output, options.output_type)
except AttributeError, e:
    sys.stderr.write("No such output type: %s\n" % options.output_type)
    sys.exit(1)
output_cb()
Exemple #52
0
            if nodeType == 'repeat':
                i = node.data[1]
                for _ in range(int(i)):
                    q.appendleft(node.data[2])
        # all moves are added to a turtleQueue which is then animated with python turtle
        executeCommands(turtleQueue)
        return


if __name__ == "__main__":
    # create table
    file = open('./parsedata.txt', 'r')
    table = parser.readTable(file)
    file.close()

    # get tokens
    file = open('./' + input('Enter file name: '), 'r')
    tokenTypes = scanner.scan(file)
    file.close()

    # parser
    structures = parser.LRParseRoutine(
        tokenTypes + ['$'],
        table)  # None if error, otherwise should be an array of productions
    if structures == None:
        print('CATANDMOUSE program is not syntactically correct')
    # animate
    else:
        tree = Project3.createTree(structures)
        Project3.traverseTree(tree)