Beispiel #1
0
 def __init__(self,MODE):
     self.mode=MODE
     self.ruleset=Ruleset()
     self.chrono=0.0 # keeps track of total time elapsed
     self.frame_number=0 # frame counter
     self.paused=False
     self.display_fps = pyglet.clock.ClockDisplay()
     pyglet.window.Window.__init__(self,vsync = True)
     # set size depending on choosen mode
     if self.mode=='FULL':
         self.set_fullscreen(True)
         self.get_display_size()
     elif self.mode in ('EXPORT','PREVIEW'): #export or preview  mode
         self.xmax = PREVIEW_SIZE[0]
         self.ymax = PREVIEW_SIZE[1]
         self.set_size(self.xmax,self.ymax)
     else:
         print 'error : undefined mode'
         exit()
     self.key_setup()
     self.print_keys()
     self.gl_setup()
     self.mouse_setup()
Beispiel #2
0
def simplify(term: Term, rules: Ruleset = RULES_DNF) -> Term:
    term = simplify_deep(term, rules)
    history = [term]
    while True:
        rules_simp = set(dest.apply_subs(unif) for src, dest in rules.items() for unif in find_unifications(term, src))
        potential = list(sorted(filter(lambda r: r and r != term, (simplify_deep(item) for item in itertools.chain(
            [term],
            rules_simp))), key=lambda r: len(list(r.get_children()))))
        for choice in potential:
            if choice in history:
                return term
            term = choice
            history.append(choice)
            break
        else:
            break

    return simplify_deep(term) or term
Beispiel #3
0
 def __init__(self,MODE):
     self.mode=MODE
     self.ruleset=Ruleset()
     self.chrono=0.0 # keeps track of total time elapsed
     self.frame_number=0 # frame counter
     self.paused=False
     self.fps_on=True
     self.fps_display = pyglet.clock.ClockDisplay()
     pyglet.window.Window.__init__(self,vsync = True)
     # set size depending on choosen mode
     if self.mode=='FULL':
         self.set_fullscreen(True)
         self.get_display_size()
     elif self.mode in ('EXPORT','PREVIEW'): #export or preview  mode
         self.xmax = PREVIEW_SIZE[0]
         self.ymax = PREVIEW_SIZE[1]
         self.set_size(self.xmax,self.ymax)
     else:
         print 'error : undefined mode'
         exit()
     self.key_setup()
     self.print_keys()
     self.gl_setup()
     self.mouse_setup()
def cli():
	parser = argparse.ArgumentParser(description='Check HTTPs rules for validity')
	parser.add_argument('checker_config', help='an integer for the accumulator')
	parser.add_argument('rule_files', nargs="*", default=[], help="Specific XML rule files")
	parser.add_argument('--json_file', default=None, help='write results in json file')
	args = parser.parse_args()

	config = SafeConfigParser()
	config.read(args.checker_config)
	
	logfile = config.get("log", "logfile")
	loglevel = convertLoglevel(config.get("log", "loglevel"))
	if logfile == "-":
		logging.basicConfig(stream=sys.stderr, level=loglevel,
			format="%(levelname)s %(message)s")
	else:
		logging.basicConfig(filename=logfile, level=loglevel,
			format="%(asctime)s %(levelname)s %(message)s [%(pathname)s:%(lineno)d]")
		
	autoDisable = False
	if config.has_option("rulesets", "auto_disable"):
		autoDisable = config.getboolean("rulesets", "auto_disable")
	# Test rules even if they have default_off=...
	includeDefaultOff = False
	if config.has_option("rulesets", "include_default_off"):
		includeDefaultOff = config.getboolean("rulesets", "include_default_off")
	ruledir = config.get("rulesets", "rulesdir")
	checkCoverage = False
	if config.has_option("rulesets", "check_coverage"):
		checkCoverage = config.getboolean("rulesets", "check_coverage")
	checkTargetValidity = False
	if config.has_option("rulesets", "check_target_validity"):
		checkTargetValidity = config.getboolean("rulesets", "check_target_validity")
	checkNonmatchGroups = False
	if config.has_option("rulesets", "check_nonmatch_groups"):
		checkNonmatchGroups = config.getboolean("rulesets", "check_nonmatch_groups")
	checkTestFormatting = False
	if config.has_option("rulesets", "check_test_formatting"):
		checkTestFormatting = config.getboolean("rulesets", "check_test_formatting")
	certdir = config.get("certificates", "basedir")
	if config.has_option("rulesets", "skiplist") and config.has_option("rulesets", "skipfield"):
		skiplist = config.get("rulesets", "skiplist")
		skipfield = config.get("rulesets", "skipfield")
		with open(skiplist) as f:
			f.readline()
			for line in f:
				splitLine = line.split(",")
				fileHash = splitLine[0]
				if splitLine[int(skipfield)] == "1":
					skipdict[binascii.unhexlify(fileHash)] = 1

	threadCount = config.getint("http", "threads")
	httpEnabled = True
	if config.has_option("http", "enabled"):
		httpEnabled = config.getboolean("http", "enabled")
	
	#get all platform dirs, make sure "default" is among them
	certdirFiles = glob.glob(os.path.join(certdir, "*"))
	havePlatforms = set([os.path.basename(fname) for fname in certdirFiles if os.path.isdir(fname)])
	logging.debug("Loaded certificate platforms: %s", ",".join(havePlatforms))
	if "default" not in havePlatforms:
		raise RuntimeError("Platform 'default' is missing from certificate directories")
	
	metricName = config.get("thresholds", "metric")
	thresholdDistance = config.getfloat("thresholds", "max_distance")
	metricClass = getMetricClass(metricName)
	metric = metricClass()
	
	# Debugging options, graphviz dump
	dumpGraphvizTrie = False
	if config.has_option("debug", "dump_graphviz_trie"):
		dumpGraphvizTrie = config.getboolean("debug", "dump_graphviz_trie")
	if dumpGraphvizTrie:
		graphvizFile = config.get("debug", "graphviz_file")
		exitAfterDump = config.getboolean("debug", "exit_after_dump")
	
	if args.rule_files:
		xmlFnames = args.rule_files
	else:
		xmlFnames = glob.glob(os.path.join(ruledir, "*.xml"))
	trie = RuleTrie()
	
	rulesets = []
	coverageProblemsExist = False
	targetValidityProblemExist = False
	nonmatchGroupProblemsExist = False
	testFormattingProblemsExist = False
	for xmlFname in xmlFnames:
		logging.debug("Parsing %s", xmlFname)
		if skipFile(xmlFname):
			logging.debug("Skipping rule file '%s', matches skiplist." % xmlFname)
			continue

		try:
			ruleset = Ruleset(etree.parse(file(xmlFname)).getroot(), xmlFname)
		except Exception, e:
			logging.error("Exception parsing %s: %s" % (xmlFname, e))
		if ruleset.defaultOff and not includeDefaultOff:
			logging.debug("Skipping rule '%s', reason: %s", ruleset.name, ruleset.defaultOff)
			continue
		# Check whether ruleset coverage by tests was sufficient.
		if checkCoverage:
			logging.debug("Checking coverage for '%s'." % ruleset.name)
			problems = ruleset.getCoverageProblems()
			for problem in problems:
				coverageProblemsExist = True
				logging.error(problem)
		if checkTargetValidity:
			logging.debug("Checking target validity for '%s'." % ruleset.name)
			problems = ruleset.getTargetValidityProblems()
			for problem in problems:
				targetValidityProblemExist = True
				logging.error(problem)
		if checkNonmatchGroups:
			logging.debug("Checking non-match groups for '%s'." % ruleset.name)
			problems = ruleset.getNonmatchGroupProblems()
			for problem in problems:
				nonmatchGroupProblemsExist = True
				logging.error(problem)
		if checkTestFormatting:
			logging.debug("Checking test formatting for '%s'." % ruleset.name)
			problems = ruleset.getTestFormattingProblems()
			for problem in problems:
				testFormattingProblemsExist = True
				logging.error(problem)
		trie.addRuleset(ruleset)
		rulesets.append(ruleset)
Beispiel #5
0
class Engine(pyglet.window.Window):

    def __init__(self,MODE):
        self.mode=MODE
        self.ruleset=Ruleset()
        self.chrono=0.0 # keeps track of total time elapsed
        self.frame_number=0 # frame counter
        self.paused=False
        self.display_fps = pyglet.clock.ClockDisplay()
        pyglet.window.Window.__init__(self,vsync = True)
        # set size depending on choosen mode
        if self.mode=='FULL':
            self.set_fullscreen(True)
            self.get_display_size()
        elif self.mode in ('EXPORT','PREVIEW'): #export or preview  mode
            self.xmax = PREVIEW_SIZE[0]
            self.ymax = PREVIEW_SIZE[1]
            self.set_size(self.xmax,self.ymax)
        else:
            print 'error : undefined mode'
            exit()
        self.key_setup()
        self.print_keys()
        self.gl_setup()
        self.mouse_setup()
    #---key handling-----------------------------------------------------------
    def key_setup(self):
        self.key_actions = {
        key.ESCAPE: lambda: exit(),
        #key.PAGEUP: lambda: self.camera.zoom(2),
        #key.PAGEDOWN: lambda: self.camera.zoom(0.5),
        #key.LEFT: lambda: self.camera.pan(self.camera.scale, -1.5708),
        #key.RIGHT: lambda: self.camera.pan(self.camera.scale, 1.5708),
        #key.DOWN: lambda: self.camera.pan(self.camera.scale, 3.1416),
        #key.UP: lambda: self.camera.pan(self.camera.scale, 0),
        key.SPACE : lambda: self.toggle_pause(),
        key.I : lambda: self.save_a_frame(),
        }

    def print_keys(self):
        print "keys to try:", \
            [key.symbol_string(k) for k in self.key_actions.keys()]

    def on_key_press(self,symbol, modifiers): #override pyglet window's
        if symbol in self.key_actions:
            self.key_actions[symbol]()

    def toggle_pause(self):
        self.paused=(True,False)[self.paused]

    #---mouse handling---------------------------------------------------------
    def mouse_setup(self):
        self.set_mouse_visible(False)

    #---GL stuff---------------------------------------------------------------
    def gl_setup(self):
        # Set the window color, this will be transparent in saved images.
        glClearColor(*BGCOLOR)
        self.gl_clear()
        # use glOrtho and turn off depthwriting and depthtesting
        #glDisable(GL_DEPTH_TEST)
        #glDepthMask(GL_FALSE)
        # Normally glColor specifies values for use when lighting is *off*,
        # and glMaterial specifies values for use when lighting is *on*.
        # Disable lighting and color material features:
        #glDisable(GL_LIGHTING)
        #glDisable(GL_COLOR_MATERIAL)
        #glLoadIdentity() # reset transformation matrix
        # Most of this is already taken care of in Pyglet.
        #glMatrixMode(GL_PROJECTION)
        #glOrtho(0, self.xmax, 0, self.ymax, -1, 1)
        #glMatrixMode(GL_MODELVIEW)
        # Enable line anti-aliasing.
        #glEnable(GL_LINE_SMOOTH)
        # Enable alpha transparency.
        #glEnable(GL_BLEND)
        #glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, \
        #       GL_ONE, GL_ONE_MINUS_SRC_ALPHA)
        #glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)

    def gl_clear(self):
        glClear(GL_COLOR_BUFFER_BIT)
        #glClear(GL_DEPTH_BUFFER_BIT)
        #glClear(GL_STENCIL_BUFFER_BIT)
        glLoadIdentity()


    def get_display_size(self):
        platform = pyglet.window.get_platform()
        display = platform.get_default_display()
        screen = display.get_default_screen()
        self.xmax = screen.width
        self.ymax = screen.height

    #---EXPORT ----------------------------------------------------------------

    def save_a_frame(self):
            file_num=str(self.frame_number).zfill(5)
            file_t=str(self.chrono)
            filename="frame-"+file_num+'-@ '+file_t+'sec.png'
            pyglet.image.get_buffer_manager().get_color_buffer().save(filename)
            print 'image file writen : ',filename

    def export_loop(self,dt):
        constant_interval=1.0/PicPS
        if self.chrono<END_TIME:
            # update at a constant dt, regardless of real time
            # so that even if refresh is slow no frame should be missing
            # self.frame_draw(PicPS)
            self.update(constant_interval)
            self.frame_draw(dt)
            self.frame_number+=1
            self.save_a_frame()
        else:
            exit()

    #---FULLSCREEN AND WINDOWED-PREVIEW MODE loop------------------------------
    def frame_draw(self,dt):
        """Clear the current OpenGL context, reset the model/view matrix and
        invoke the `draw()` methods of the zulus
        """
        if not self.paused:
            self.gl_clear()
            zulus.paint_all()
            if DEBUG:
                self.display_fps.draw()

    def update(self,dt):
        if not self.paused:
            self.chrono+=dt
            self.ruleset.update(dt,self.chrono)
        else:
            pass

    #---run loop options-------------------------------------------------------

    def run(self):
            # schedule pyglet ondraw loop at max framerate
            # and the update function at more than fps
            # frame / time driven loop

        if  self.mode in ('FULL','PREVIEW'):
            clock.schedule_interval(self.frame_draw,1.0/FPS)
            clock.schedule_interval_soft(self.update, 1.0/(1.0*FPS))
        elif self.mode == 'EXPORT': # export loop
            # try to run export method at final anim speed,
            clock.schedule_interval_soft(self.export_loop,1.0/PicPS)
            # anyway run preview at good rate
            clock.schedule_interval_soft(self.frame_draw, 1.0/FPS)

        pyglet.app.run()
	dumpGraphvizTrie = False
	if config.has_option("debug", "dump_graphviz_trie"):
		dumpGraphvizTrie = config.getboolean("debug", "dump_graphviz_trie")
	if dumpGraphvizTrie:
		graphvizFile = config.get("debug", "graphviz_file")
		exitAfterDump = config.getboolean("debug", "exit_after_dump")
	
	
	xmlFnames = glob.glob(os.path.join(ruledir, "*.xml"))
	trie = RuleTrie()
	
	# set of main pages to test
	mainPages = set(urlList)
	
	for xmlFname in xmlFnames:
		ruleset = Ruleset(etree.parse(file(xmlFname)).getroot(), xmlFname)
		if ruleset.defaultOff:
			logging.debug("Skipping rule '%s', reason: %s", ruleset.name, ruleset.defaultOff)
			continue
		#if list of URLs to test/scan was not defined, guess URLs from target elements
		if not urlList:
			for target in ruleset.uniqueTargetFQDNs():
				targetHTTPLangingPage = "http://%s/" % target
				if not ruleset.excludes(targetHTTPLangingPage):
					mainPages.add(targetHTTPLangingPage)
				else:
					logging.debug("Skipping landing page %s", targetHTTPLangingPage)
		trie.addRuleset(ruleset)
	
	# Trie is built now, dump it if it's set in config
	if dumpGraphvizTrie:
Beispiel #7
0
def cli():
    parser = argparse.ArgumentParser(
        description='Check HTTPs rules for validity')
    parser.add_argument('checker_config',
                        help='an integer for the accumulator')
    parser.add_argument('rule_files',
                        nargs="*",
                        default=[],
                        help="Specific XML rule files")
    parser.add_argument('--json_file',
                        default=None,
                        help='write results in json file')
    args = parser.parse_args()

    config = SafeConfigParser()
    config.read(args.checker_config)

    logfile = config.get("log", "logfile")
    loglevel = convertLoglevel(config.get("log", "loglevel"))
    if logfile == "-":
        logging.basicConfig(stream=sys.stderr,
                            level=loglevel,
                            format="%(levelname)s %(message)s")
    else:
        logging.basicConfig(
            filename=logfile,
            level=loglevel,
            format=
            "%(asctime)s %(levelname)s %(message)s [%(pathname)s:%(lineno)d]")

    autoDisable = False
    if config.has_option("rulesets", "auto_disable"):
        autoDisable = config.getboolean("rulesets", "auto_disable")
    # Test rules even if they have default_off=...
    includeDefaultOff = False
    if config.has_option("rulesets", "include_default_off"):
        includeDefaultOff = config.getboolean("rulesets",
                                              "include_default_off")
    ruledir = config.get("rulesets", "rulesdir")
    checkCoverage = False
    if config.has_option("rulesets", "check_coverage"):
        checkCoverage = config.getboolean("rulesets", "check_coverage")
    checkTargetValidity = False
    if config.has_option("rulesets", "check_target_validity"):
        checkTargetValidity = config.getboolean("rulesets",
                                                "check_target_validity")
    checkNonmatchGroups = False
    if config.has_option("rulesets", "check_nonmatch_groups"):
        checkNonmatchGroups = config.getboolean("rulesets",
                                                "check_nonmatch_groups")
    checkTestFormatting = False
    if config.has_option("rulesets", "check_test_formatting"):
        checkTestFormatting = config.getboolean("rulesets",
                                                "check_test_formatting")
    certdir = config.get("certificates", "basedir")
    if config.has_option("rulesets", "skiplist") and config.has_option(
            "rulesets", "skipfield"):
        skiplist = config.get("rulesets", "skiplist")
        skipfield = config.get("rulesets", "skipfield")
        with open(skiplist) as f:
            f.readline()
            for line in f:
                splitLine = line.split(",")
                fileHash = splitLine[0]
                if splitLine[int(skipfield)] == "1":
                    skipdict[binascii.unhexlify(fileHash)] = 1

    threadCount = config.getint("http", "threads")
    httpEnabled = True
    if config.has_option("http", "enabled"):
        httpEnabled = config.getboolean("http", "enabled")

    metricName = config.get("thresholds", "metric")
    thresholdDistance = config.getfloat("thresholds", "max_distance")
    metricClass = getMetricClass(metricName)
    metric = metricClass()

    # Debugging options, graphviz dump
    dumpGraphvizTrie = False
    if config.has_option("debug", "dump_graphviz_trie"):
        dumpGraphvizTrie = config.getboolean("debug", "dump_graphviz_trie")
    if dumpGraphvizTrie:
        graphvizFile = config.get("debug", "graphviz_file")
        exitAfterDump = config.getboolean("debug", "exit_after_dump")

    if args.rule_files:
        xmlFnames = args.rule_files
    else:
        xmlFnames = glob.glob(os.path.join(ruledir, "*.xml"))
    trie = RuleTrie()

    rulesets = []
    coverageProblemsExist = False
    targetValidityProblemExist = False
    nonmatchGroupProblemsExist = False
    testFormattingProblemsExist = False
    for xmlFname in xmlFnames:
        logging.debug("Parsing {}".format(xmlFname))
        if skipFile(xmlFname):
            logging.debug(
                "Skipping rule file '{}', matches skiplist.".format(xmlFname))
            continue

        ruleset = Ruleset(
            etree.parse(open(xmlFname, "rb")).getroot(), xmlFname)
        if ruleset.defaultOff and not includeDefaultOff:
            logging.debug("Skipping rule '{}', reason: {}".format(
                ruleset.name, ruleset.defaultOff))
            continue
        # Check whether ruleset coverage by tests was sufficient.
        if checkCoverage:
            logging.debug("Checking coverage for '{}'.".format(ruleset.name))
            problems = ruleset.getCoverageProblems()
            for problem in problems:
                coverageProblemsExist = True
                logging.error(problem)
        if checkTargetValidity:
            logging.debug("Checking target validity for '{}'.".format(
                ruleset.name))
            problems = ruleset.getTargetValidityProblems()
            for problem in problems:
                targetValidityProblemExist = True
                logging.error(problem)
        if checkNonmatchGroups:
            logging.debug("Checking non-match groups for '{}'.".format(
                ruleset.name))
            problems = ruleset.getNonmatchGroupProblems()
            for problem in problems:
                nonmatchGroupProblemsExist = True
                logging.error(problem)
        if checkTestFormatting:
            logging.debug("Checking test formatting for '{}'.".format(
                ruleset.name))
            problems = ruleset.getTestFormattingProblems()
            for problem in problems:
                testFormattingProblemsExist = True
                logging.error(problem)
        trie.addRuleset(ruleset)
        rulesets.append(ruleset)

    # Trie is built now, dump it if it's set in config
    if dumpGraphvizTrie:
        logging.debug("Dumping graphviz ruleset trie")
        graph = trie.generateGraphizGraph()
        if graphvizFile == "-":
            graph.dot()
        else:
            with open(graphvizFile, "w") as gvFd:
                graph.dot(gvFd)
        if exitAfterDump:
            sys.exit(0)
    fetchOptions = http_client.FetchOptions(config)
    fetchers = list()

    # Ensure "default" is in the platform dirs
    if not os.path.isdir(os.path.join(certdir, "default")):
        raise RuntimeError(
            "Platform 'default' is missing from certificate directories")

    platforms = http_client.CertificatePlatforms(
        os.path.join(certdir, "default"))
    fetchers.append(
        http_client.HTTPFetcher("default", platforms, fetchOptions, trie))
    # fetches pages with unrewritten URLs
    fetcherPlain = http_client.HTTPFetcher("default", platforms, fetchOptions)

    urlList = []
    if config.has_option("http", "url_list"):
        with open(config.get("http", "url_list")) as urlFile:
            urlList = [line.rstrip() for line in urlFile.readlines()]

    if httpEnabled:
        taskQueue = queue.Queue(1000)
        resQueue = queue.Queue()
        startTime = time.time()
        testedUrlPairCount = 0
        config.getboolean("debug", "exit_after_dump")

        for i in range(threadCount):
            t = UrlComparisonThread(taskQueue, metric, thresholdDistance,
                                    autoDisable, resQueue)
            t.setDaemon(True)
            t.start()

        # set of main pages to test
        mainPages = set(urlList)
        # If list of URLs to test/scan was not defined, use the test URL extraction
        # methods built into the Ruleset implementation.
        if not urlList:
            for ruleset in rulesets:
                if ruleset.platform != "default" and os.path.isdir(
                        os.path.join(certdir, ruleset.platform)):
                    theseFetchers = copy.deepcopy(fetchers)
                    platforms.addPlatform(
                        ruleset.platform,
                        os.path.join(certdir, ruleset.platform))
                    theseFetchers.append(
                        http_client.HTTPFetcher(ruleset.platform, platforms,
                                                fetchOptions, trie))
                else:
                    theseFetchers = fetchers
                testUrls = []
                for test in ruleset.tests:
                    if not ruleset.excludes(test.url):
                        testedUrlPairCount += 1
                        testUrls.append(test.url)
                    else:
                        # TODO: We should fetch the non-rewritten exclusion URLs to make
                        # sure they still exist.
                        logging.debug("Skipping excluded URL {}".format(
                            test.url))
                task = ComparisonTask(testUrls, fetcherPlain, theseFetchers,
                                      ruleset)
                taskQueue.put(task)

        taskQueue.join()
        logging.info(
            "Finished in {:.2f} seconds. Loaded rulesets: {}, URL pairs: {}.".
            format(time.time() - startTime, len(xmlFnames),
                   testedUrlPairCount))
        if args.json_file:
            json_output(resQueue, args.json_file, problems)
    if checkCoverage:
        if coverageProblemsExist:
            return 1  # exit with error code
    if checkTargetValidity:
        if targetValidityProblemExist:
            return 1  # exit with error code
    if checkNonmatchGroups:
        if nonmatchGroupProblemsExist:
            return 1  # exit with error code
    if checkTestFormatting:
        if testFormattingProblemsExist:
            return 1  # exit with error code
    return 0  # exit with success
Beispiel #8
0
class Engine(pyglet.window.Window):

    def __init__(self,MODE):
        self.mode=MODE
        self.ruleset=Ruleset()
        self.chrono=0.0 # keeps track of total time elapsed
        self.frame_number=0 # frame counter
        self.paused=False
        self.fps_on=True
        self.fps_display = pyglet.clock.ClockDisplay()
        pyglet.window.Window.__init__(self,vsync = True)
        # set size depending on choosen mode
        if self.mode=='FULL':
            self.set_fullscreen(True)
            self.get_display_size()
        elif self.mode in ('EXPORT','PREVIEW'): #export or preview  mode
            self.xmax = PREVIEW_SIZE[0]
            self.ymax = PREVIEW_SIZE[1]
            self.set_size(self.xmax,self.ymax)
        else:
            print 'error : undefined mode'
            exit()
        self.key_setup()
        self.print_keys()
        self.gl_setup()
        self.mouse_setup()
    #---key handling-----------------------------------------------------------
    def key_setup(self):
        self.key_actions = {
        key.ESCAPE: lambda: exit(),
        #key.PAGEUP: lambda: self.camera.zoom(2),
        #key.PAGEDOWN: lambda: self.camera.zoom(0.5),
        #key.LEFT: lambda: self.camera.pan(self.camera.scale, -1.5708),
        #key.RIGHT: lambda: self.camera.pan(self.camera.scale, 1.5708),
        #key.DOWN: lambda: self.camera.pan(self.camera.scale, 3.1416),
        #key.UP: lambda: self.camera.pan(self.camera.scale, 0),
        key.SPACE : lambda: self.toggle_pause(),
        key.F : lambda: self.toggle_fps_on(),
        key.I : lambda: self.save_a_frame(),
        }

    def print_keys(self):
        print "keys to try:", \
            [key.symbol_string(k) for k in self.key_actions.keys()]

    def on_key_press(self,symbol, modifiers): #override pyglet window's
        if symbol in self.key_actions:
            self.key_actions[symbol]()

    def toggle_pause(self):
        self.paused=(True,False)[self.paused]

    def toggle_fps_on(self):
        self.fps_on=(True,False)[self.fps_on]



    #---mouse handling---------------------------------------------------------
    def mouse_setup(self):
        self.set_mouse_visible(False)

    #---GL stuff---------------------------------------------------------------
    def gl_setup(self):
        # Set the window color, this will be transparent in saved images.
        glClearColor(*BGCOLOR)
        glEnable(GL_LINE_SMOOTH)
        glEnable(GL_BLEND)
        glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)

    def gl_clear(self):
        glClear(GL_COLOR_BUFFER_BIT)
        #glClear(GL_DEPTH_BUFFER_BIT)
        #glClear(GL_STENCIL_BUFFER_BIT)
        glLoadIdentity()


    def get_display_size(self):
        platform = pyglet.window.get_platform()
        display = platform.get_default_display()
        screen = display.get_default_screen()
        self.xmax = screen.width
        self.ymax = screen.height

    #---EXPORT ----------------------------------------------------------------

    def save_a_frame(self):
            file_num=str(self.frame_number).zfill(5)
            file_t=str(self.chrono)
            filename="frame-"+file_num+'-@ '+file_t+'sec.png'
            pyglet.image.get_buffer_manager().get_color_buffer().save(filename)
            print 'image file writen : ',filename

    def export_loop(self,dt):
        constant_interval=1.0/PicPS
        if self.chrono<END_TIME:
            # update at a constant dt, regardless of real time
            # so that even if refresh is slow no frame should be missing
            # self.frame_draw(PicPS)
            self.update(constant_interval)
            self.frame_draw(dt)
            self.frame_number+=1
            self.save_a_frame()
        else:
            exit()

    #---FULLSCREEN AND WINDOWED-PREVIEW MODE loop------------------------------
    def frame_draw(self,dt):
        """Clear the current OpenGL context, reset the model/view matrix and
        invoke the `draw()` methods of the zulus
        """
        if not self.paused:
            self.gl_clear()
            zulus.paint_all()
            if self.fps_on:self.fps_display.draw()

    def update(self,dt):
        if not self.paused:
            self.chrono+=dt
            self.ruleset.update(dt,self.chrono)
        else:
            pass

    #---run loop options-------------------------------------------------------

    def run(self):
            # schedule pyglet ondraw loop at max framerate
            # and the update function at more than fps
            # frame / time driven loop

        if  self.mode in ('FULL','PREVIEW'):
            clock.schedule_interval(self.frame_draw,1.0/FPS)
            clock.schedule_interval_soft(self.update, 1.0/(1.0*FPS))
        elif self.mode == 'EXPORT': # export loop
            # try to run export method at final anim speed,
            clock.schedule_interval_soft(self.export_loop,1.0/PicPS)
            # anyway run preview at good rate
            clock.schedule_interval_soft(self.frame_draw, 1.0/FPS)

        pyglet.app.run()
def cli():
    parser = argparse.ArgumentParser(
        description='Check HTTPs rules for validity')
    parser.add_argument(
        'checker_config', help='an integer for the accumulator')
    parser.add_argument('rule_files', nargs="*", default=[],
                        help="Specific XML rule files")
    parser.add_argument('--json_file', default=None,
                        help='write results in json file')
    args = parser.parse_args()

    config = SafeConfigParser()
    config.read(args.checker_config)

    logfile = config.get("log", "logfile")
    loglevel = convertLoglevel(config.get("log", "loglevel"))
    if logfile == "-":
        logging.basicConfig(stream=sys.stderr, level=loglevel,
                            format="%(levelname)s %(message)s")
    else:
        logging.basicConfig(filename=logfile, level=loglevel,
                            format="%(asctime)s %(levelname)s %(message)s [%(pathname)s:%(lineno)d]")

    autoDisable = False
    if config.has_option("rulesets", "auto_disable"):
        autoDisable = config.getboolean("rulesets", "auto_disable")
    # Test rules even if they have default_off=...
    includeDefaultOff = False
    if config.has_option("rulesets", "include_default_off"):
        includeDefaultOff = config.getboolean(
            "rulesets", "include_default_off")
    ruledir = config.get("rulesets", "rulesdir")
    checkCoverage = False
    if config.has_option("rulesets", "check_coverage"):
        checkCoverage = config.getboolean("rulesets", "check_coverage")
    checkTargetValidity = False
    if config.has_option("rulesets", "check_target_validity"):
        checkTargetValidity = config.getboolean(
            "rulesets", "check_target_validity")
    checkNonmatchGroups = False
    if config.has_option("rulesets", "check_nonmatch_groups"):
        checkNonmatchGroups = config.getboolean(
            "rulesets", "check_nonmatch_groups")
    checkTestFormatting = False
    if config.has_option("rulesets", "check_test_formatting"):
        checkTestFormatting = config.getboolean(
            "rulesets", "check_test_formatting")
    certdir = config.get("certificates", "basedir")
    if config.has_option("rulesets", "skiplist") and config.has_option("rulesets", "skipfield"):
        skiplist = config.get("rulesets", "skiplist")
        skipfield = config.get("rulesets", "skipfield")
        with open(skiplist) as f:
            f.readline()
            for line in f:
                splitLine = line.split(",")
                fileHash = splitLine[0]
                if splitLine[int(skipfield)] == "1":
                    skipdict[binascii.unhexlify(fileHash)] = 1

    threadCount = config.getint("http", "threads")
    httpEnabled = True
    if config.has_option("http", "enabled"):
        httpEnabled = config.getboolean("http", "enabled")

    metricName = config.get("thresholds", "metric")
    thresholdDistance = config.getfloat("thresholds", "max_distance")
    metricClass = getMetricClass(metricName)
    metric = metricClass()

    # Debugging options, graphviz dump
    dumpGraphvizTrie = False
    if config.has_option("debug", "dump_graphviz_trie"):
        dumpGraphvizTrie = config.getboolean("debug", "dump_graphviz_trie")
    if dumpGraphvizTrie:
        graphvizFile = config.get("debug", "graphviz_file")
        exitAfterDump = config.getboolean("debug", "exit_after_dump")

    if args.rule_files:
        xmlFnames = args.rule_files
    else:
        xmlFnames = glob.glob(os.path.join(ruledir, "*.xml"))
    trie = RuleTrie()

    rulesets = []
    coverageProblemsExist = False
    targetValidityProblemExist = False
    nonmatchGroupProblemsExist = False
    testFormattingProblemsExist = False
    for xmlFname in xmlFnames:
        logging.debug("Parsing {}".format(xmlFname))
        if skipFile(xmlFname):
            logging.debug(
                "Skipping rule file '{}', matches skiplist.".format(xmlFname))
            continue

        ruleset = Ruleset(etree.parse(open(xmlFname, "rb")).getroot(), xmlFname)
        if ruleset.defaultOff and not includeDefaultOff:
            logging.debug("Skipping rule '{}', reason: {}".format(
                          ruleset.name, ruleset.defaultOff))
            continue
        # Check whether ruleset coverage by tests was sufficient.
        if checkCoverage:
            logging.debug("Checking coverage for '{}'.".format(ruleset.name))
            problems = ruleset.getCoverageProblems()
            for problem in problems:
                coverageProblemsExist = True
                logging.error(problem)
        if checkTargetValidity:
            logging.debug("Checking target validity for '{}'.".format(ruleset.name))
            problems = ruleset.getTargetValidityProblems()
            for problem in problems:
                targetValidityProblemExist = True
                logging.error(problem)
        if checkNonmatchGroups:
            logging.debug("Checking non-match groups for '{}'.".format(ruleset.name))
            problems = ruleset.getNonmatchGroupProblems()
            for problem in problems:
                nonmatchGroupProblemsExist = True
                logging.error(problem)
        if checkTestFormatting:
            logging.debug("Checking test formatting for '{}'.".format(ruleset.name))
            problems = ruleset.getTestFormattingProblems()
            for problem in problems:
                testFormattingProblemsExist = True
                logging.error(problem)
        trie.addRuleset(ruleset)
        rulesets.append(ruleset)

    # Trie is built now, dump it if it's set in config
    if dumpGraphvizTrie:
        logging.debug("Dumping graphviz ruleset trie")
        graph = trie.generateGraphizGraph()
        if graphvizFile == "-":
            graph.dot()
        else:
            with open(graphvizFile, "w") as gvFd:
                graph.dot(gvFd)
        if exitAfterDump:
            sys.exit(0)
    fetchOptions = http_client.FetchOptions(config)
    fetchers = list()

    # Ensure "default" is in the platform dirs
    if not os.path.isdir(os.path.join(certdir, "default")):
        raise RuntimeError(
            "Platform 'default' is missing from certificate directories")

    platforms = http_client.CertificatePlatforms(
        os.path.join(certdir, "default"))
    fetchers.append(http_client.HTTPFetcher(
        "default", platforms, fetchOptions, trie))
    # fetches pages with unrewritten URLs
    fetcherPlain = http_client.HTTPFetcher("default", platforms, fetchOptions)

    urlList = []
    if config.has_option("http", "url_list"):
        with open(config.get("http", "url_list")) as urlFile:
            urlList = [line.rstrip() for line in urlFile.readlines()]

    if httpEnabled:
        taskQueue = queue.Queue(1000)
        resQueue = queue.Queue()
        startTime = time.time()
        testedUrlPairCount = 0
        config.getboolean("debug", "exit_after_dump")

        for i in range(threadCount):
            t = UrlComparisonThread(
                taskQueue, metric, thresholdDistance, autoDisable, resQueue)
            t.setDaemon(True)
            t.start()

        # set of main pages to test
        mainPages = set(urlList)
        # If list of URLs to test/scan was not defined, use the test URL extraction
        # methods built into the Ruleset implementation.
        if not urlList:
            for ruleset in rulesets:
                if ruleset.platform != "default" and os.path.isdir(os.path.join(certdir, ruleset.platform)):
                    theseFetchers = copy.deepcopy(fetchers)
                    platforms.addPlatform(ruleset.platform, os.path.join(certdir, ruleset.platform))
                    theseFetchers.append(http_client.HTTPFetcher(
                        ruleset.platform, platforms, fetchOptions, trie))
                else:
                    theseFetchers = fetchers
                testUrls = []
                for test in ruleset.tests:
                    if not ruleset.excludes(test.url):
                        testedUrlPairCount += 1
                        testUrls.append(test.url)
                    else:
                        # TODO: We should fetch the non-rewritten exclusion URLs to make
                        # sure they still exist.
                        logging.debug("Skipping excluded URL {}".format(test.url))
                task = ComparisonTask(testUrls, fetcherPlain, theseFetchers, ruleset)
                taskQueue.put(task)

        taskQueue.join()
        logging.info("Finished in {:.2f} seconds. Loaded rulesets: {}, URL pairs: {}.".format(
                     time.time() - startTime, len(xmlFnames), testedUrlPairCount))
        if args.json_file:
            json_output(resQueue, args.json_file, problems)
    if checkCoverage:
        if coverageProblemsExist:
            return 1  # exit with error code
    if checkTargetValidity:
        if targetValidityProblemExist:
            return 1  # exit with error code
    if checkNonmatchGroups:
        if nonmatchGroupProblemsExist:
            return 1  # exit with error code
    if checkTestFormatting:
        if testFormattingProblemsExist:
            return 1  # exit with error code
    return 0  # exit with success
def cli():
    if len(sys.argv) < 2:
        print >> sys.stderr, "check_rules.py checker.config"
        sys.exit(1)

    config = SafeConfigParser()
    config.read(sys.argv[1])

    logfile = config.get("log", "logfile")
    loglevel = convertLoglevel(config.get("log", "loglevel"))
    if logfile == "-":
        logging.basicConfig(
            stream=sys.stderr,
            level=loglevel,
            format=
            "%(asctime)s %(levelname)s %(message)s [%(pathname)s:%(lineno)d]")
    else:
        logging.basicConfig(
            filename=logfile,
            level=loglevel,
            format=
            "%(asctime)s %(levelname)s %(message)s [%(pathname)s:%(lineno)d]")

    ruledir = config.get("rulesets", "rulesdir")
    certdir = config.get("certificates", "basedir")

    threadCount = config.getint("http", "threads")

    #get all platform dirs, make sure "default" is among them
    certdirFiles = glob.glob(os.path.join(certdir, "*"))
    havePlatforms = set([
        os.path.basename(fname) for fname in certdirFiles
        if os.path.isdir(fname)
    ])
    logging.debug("Loaded certificate platforms: %s", ",".join(havePlatforms))
    if "default" not in havePlatforms:
        raise RuntimeError(
            "Platform 'default' is missing from certificate directories")

    metricName = config.get("thresholds", "metric")
    thresholdDistance = config.getfloat("thresholds", "max_distance")
    metricClass = getMetricClass(metricName)
    metric = metricClass()

    urlList = []
    if config.has_option("http", "url_list"):
        with file(config.get("http", "url_list")) as urlFile:
            urlList = [line.rstrip() for line in urlFile.readlines()]

    # Debugging options, graphviz dump
    dumpGraphvizTrie = False
    if config.has_option("debug", "dump_graphviz_trie"):
        dumpGraphvizTrie = config.getboolean("debug", "dump_graphviz_trie")
    if dumpGraphvizTrie:
        graphvizFile = config.get("debug", "graphviz_file")
        exitAfterDump = config.getboolean("debug", "exit_after_dump")

    xmlFnames = glob.glob(os.path.join(ruledir, "*.xml"))
    trie = RuleTrie()

    # set of main pages to test
    mainPages = set(urlList)

    for xmlFname in xmlFnames:
        ruleset = Ruleset(etree.parse(file(xmlFname)).getroot(), xmlFname)
        if ruleset.defaultOff:
            logging.debug("Skipping rule '%s', reason: %s", ruleset.name,
                          ruleset.defaultOff)
            continue
        #if list of URLs to test/scan was not defined, guess URLs from target elements
        if not urlList:
            for target in ruleset.uniqueTargetFQDNs():
                targetHTTPLangingPage = "http://%s/" % target
                if not ruleset.excludes(targetHTTPLangingPage):
                    mainPages.add(targetHTTPLangingPage)
                else:
                    logging.debug("Skipping landing page %s",
                                  targetHTTPLangingPage)
        trie.addRuleset(ruleset)

    # Trie is built now, dump it if it's set in config
    if dumpGraphvizTrie:
        logging.debug("Dumping graphviz ruleset trie")
        graph = trie.generateGraphizGraph()
        if graphvizFile == "-":
            graph.dot()
        else:
            with file(graphvizFile, "w") as gvFd:
                graph.dot(gvFd)
        if exitAfterDump:
            sys.exit(0)

    fetchOptions = http_client.FetchOptions(config)
    fetcherMap = dict()  #maps platform to fetcher

    platforms = http_client.CertificatePlatforms(
        os.path.join(certdir, "default"))
    for platform in havePlatforms:
        #adding "default" again won't break things
        platforms.addPlatform(platform, os.path.join(certdir, platform))
        fetcher = http_client.HTTPFetcher(platform, platforms, fetchOptions,
                                          trie)
        fetcherMap[platform] = fetcher

    #fetches pages with unrewritten URLs
    fetcherPlain = http_client.HTTPFetcher("default", platforms, fetchOptions)

    taskQueue = Queue.Queue(1000)
    startTime = time.time()
    testedUrlPairCount = 0

    for i in range(threadCount):
        t = UrlComparisonThread(taskQueue, metric, thresholdDistance)
        t.setDaemon(True)
        t.start()

    for plainUrl in mainPages:
        try:
            ruleFname = None
            ruleMatch = trie.transformUrl(plainUrl)
            transformedUrl = ruleMatch.url

            if plainUrl == transformedUrl:
                logging.info("Identical URL: %s", plainUrl)
                continue

            #URL was transformed, thus ruleset must exist that did it
            ruleFname = os.path.basename(ruleMatch.ruleset.filename)
            fetcher = fetcherMap.get(ruleMatch.ruleset.platform)
            if not fetcher:
                logging.warn(
                    "Unknown platform '%s', using 'default' instead. Rulefile: %s.",
                    ruleMatch.ruleset.platform, ruleFname)
                fetcher = fetcherMap["default"]

        except:
            logging.exception(
                "Failed to transform plain URL %s. Rulefile: %s.", plainUrl,
                ruleFname)
            continue

        testedUrlPairCount += 1
        task = ComparisonTask(plainUrl, transformedUrl, fetcherPlain, fetcher,
                              ruleFname)
        taskQueue.put(task)

    taskQueue.join()
    logging.info(
        "Finished in %.2f seconds. Loaded rulesets: %d, URL pairs: %d.",
        time.time() - startTime, len(xmlFnames), testedUrlPairCount)
def cli():
	if len(sys.argv) < 2:
		print >> sys.stderr, "check_rules.py checker.config"
		sys.exit(1)
	
	config = SafeConfigParser()
	config.read(sys.argv[1])
	
	logfile = config.get("log", "logfile")
	loglevel = convertLoglevel(config.get("log", "loglevel"))
	if logfile == "-":
		logging.basicConfig(stream=sys.stderr, level=loglevel,
			format="%(asctime)s %(levelname)s %(message)s [%(pathname)s:%(lineno)d]")
	else:
		logging.basicConfig(filename=logfile, level=loglevel,
			format="%(asctime)s %(levelname)s %(message)s [%(pathname)s:%(lineno)d]")
		
	ruledir = config.get("rulesets", "rulesdir")
	certdir = config.get("certificates", "basedir")
	
	threadCount = config.getint("http", "threads")
	
	#get all platform dirs, make sure "default" is among them
	certdirFiles = glob.glob(os.path.join(certdir, "*"))
	havePlatforms = set([os.path.basename(fname) for fname in certdirFiles if os.path.isdir(fname)])
	logging.debug("Loaded certificate platforms: %s", ",".join(havePlatforms))
	if "default" not in havePlatforms:
		raise RuntimeError("Platform 'default' is missing from certificate directories")
	
	metricName = config.get("thresholds", "metric")
	thresholdDistance = config.getfloat("thresholds", "max_distance")
	metricClass = getMetricClass(metricName)
	metric = metricClass()
	
	urlList = []
	if config.has_option("http", "url_list"):
		with file(config.get("http", "url_list")) as urlFile:
			urlList = [line.rstrip() for line in urlFile.readlines()]
			
	# Debugging options, graphviz dump
	dumpGraphvizTrie = False
	if config.has_option("debug", "dump_graphviz_trie"):
		dumpGraphvizTrie = config.getboolean("debug", "dump_graphviz_trie")
	if dumpGraphvizTrie:
		graphvizFile = config.get("debug", "graphviz_file")
		exitAfterDump = config.getboolean("debug", "exit_after_dump")
	
	
	xmlFnames = glob.glob(os.path.join(ruledir, "*.xml"))
	trie = RuleTrie()
	
	# set of main pages to test
	mainPages = set(urlList)
	
	for xmlFname in xmlFnames:
		ruleset = Ruleset(etree.parse(file(xmlFname)).getroot(), xmlFname)
		if ruleset.defaultOff:
			logging.debug("Skipping rule '%s', reason: %s", ruleset.name, ruleset.defaultOff)
			continue
		#if list of URLs to test/scan was not defined, guess URLs from target elements
		if not urlList:
			for target in ruleset.uniqueTargetFQDNs():
				targetHTTPLangingPage = "http://%s/" % target
				if not ruleset.excludes(targetHTTPLangingPage):
					mainPages.add(targetHTTPLangingPage)
				else:
					logging.debug("Skipping landing page %s", targetHTTPLangingPage)
		trie.addRuleset(ruleset)
	
	# Trie is built now, dump it if it's set in config
	if dumpGraphvizTrie:
		logging.debug("Dumping graphviz ruleset trie")
		graph = trie.generateGraphizGraph()
		if graphvizFile == "-":
			graph.dot()
		else:
			with file(graphvizFile, "w") as gvFd:
				graph.dot(gvFd)
		if exitAfterDump:
			sys.exit(0)
	
	fetchOptions = http_client.FetchOptions(config)
	fetcherMap = dict() #maps platform to fetcher
	
	platforms = http_client.CertificatePlatforms(os.path.join(certdir, "default"))
	for platform in havePlatforms:
		#adding "default" again won't break things
		platforms.addPlatform(platform, os.path.join(certdir, platform))
		fetcher = http_client.HTTPFetcher(platform, platforms, fetchOptions, trie)
		fetcherMap[platform] = fetcher
	
	#fetches pages with unrewritten URLs
	fetcherPlain = http_client.HTTPFetcher("default", platforms, fetchOptions)
	
	taskQueue = Queue.Queue(1000)
	startTime = time.time()
	testedUrlPairCount = 0
	
	for i in range(threadCount):
		t = UrlComparisonThread(taskQueue, metric, thresholdDistance)
		t.setDaemon(True)
		t.start()
	
	for plainUrl in mainPages:
		try:
			ruleFname = None
			ruleMatch = trie.transformUrl(plainUrl)
			transformedUrl = ruleMatch.url
			
			if plainUrl == transformedUrl:
				logging.info("Identical URL: %s", plainUrl)
				continue
			
			#URL was transformed, thus ruleset must exist that did it
			ruleFname = os.path.basename(ruleMatch.ruleset.filename)
			fetcher = fetcherMap.get(ruleMatch.ruleset.platform)
			if not fetcher:
				logging.warn("Unknown platform '%s', using 'default' instead. Rulefile: %s.",
					ruleMatch.ruleset.platform, ruleFname)
				fetcher = fetcherMap["default"]
				
		except:
			logging.exception("Failed to transform plain URL %s. Rulefile: %s.",
				plainUrl, ruleFname)
			continue
		
		testedUrlPairCount += 1
		task = ComparisonTask(plainUrl, transformedUrl, fetcherPlain, fetcher, ruleFname)
		taskQueue.put(task)
		
	taskQueue.join()
	logging.info("Finished in %.2f seconds. Loaded rulesets: %d, URL pairs: %d.",
		time.time() - startTime, len(xmlFnames), testedUrlPairCount)