def run (self): if not self.conf.force: self.error (" Must force writes (--force).") clip.exit (err = True) rc = False content = None in_file = Path (self.kwargs.in_file) if not in_file.isfile (): self.error ("Template file does not exist.") clip.exit (err = True) self.info (" Generate...") if self.kwargs.out_file == "=": f = Path (self.kwargs.in_file) out_file = f.parent / f.namebase rc = self.process_one_file (in_file, out_file, self.make_file) content = out_file.bytes () elif self.kwargs.out_file: out_file = Path (self.kwargs.out_file) rc = self.process_one_file (in_file, out_file, self.make_file) content = out_file.bytes () else: with temporary.temp_file () as fname: out_file = Path (fname) rc = self.process_one_file (in_file, out_file, self.make_file) content = out_file.bytes () if not self.kwargs.out_file: self.info ("Produced file:") self.report (content) clip.exit (err = (True if rc else False))
def test_compare_workspace_workflow(self): if self._is_base_test(): return spec_source_path = Path(ftw.workspace.__file__).joinpath( '..', 'profiles', 'default', 'workflows', Path(self.workflow_path).basename(), self.specification_name).abspath() self.assertTrue( spec_source_path.isfile(), "The source file at '{}' does no longer exists.".format( spec_source_path)) class_file_path = sys.modules[self.__class__.__module__].__file__ spec_target_path = Path(class_file_path).joinpath( '..', self.workflow_path, self.specification_name).abspath() self.maxDiff = None self.assertMultiLineEqual( spec_source_path.bytes(), spec_target_path.bytes() if spec_target_path.isfile() else '' """ Your policy specification.txt is not anymore synced with the specifiaction in ftw.workspace. ftw.workspace provides a recommended workflow which should be used in your package. Perhaps this workflow have been updated and you didn't updated your policy with that new workflow. If this is the case, just type in the following command to fix the test: cp {} {} Otherwise if you want to provide an own workflow in your policy which is different than the recommended workflow in ftw.workspace, you have to adjust the failing testcase. Just inherit from the `WorfklowTest` class directly and your wokflow will be tested as usual: Rename: {}(CompareWorkspaceWorkflowTest) to {}(WorkflowTest) """.format( spec_source_path, spec_target_path, self.__class__.__name__, self.__class__.__name__))
def render(dir, f): # povray has strange file access policy, # better to generate under tmp # cli doc: # http://library.thinkquest.org/3285/language/cmdln.html templ = '#local pcb_rotate_%s = %s' pov = Path(f.replace('.brd', '.pov')) if pcb_rotate != (0, 0, 0): s = pov.bytes() s = s.replace(templ % ('x', 0), templ % ('x', pcb_rotate[0])) s = s.replace(templ % ('y', 0), templ % ('y', pcb_rotate[1])) s = s.replace(templ % ('z', 0), templ % ('z', pcb_rotate[2])) pov.write_bytes(s) fpng = Path(f.replace('.brd', '.png')) cmd = [] cmd += ["povray"] cmd += ["-d"] # no display cmd += ["-a"] # anti-aliasing cmd += ['+W' + str(size[0])] # width cmd += ['+H' + str(size[1])] # height cmd += ['-o' + fpng] cmd += ['-L' + eagle3d] cmd += [pov] p = Proc(cmd).call() if not fpng.exists(): raise EagleError('povray error, proc=%s' % p) fpng.copy(output)
# session and the file describes the gap between the previously already # existing tweets and the newer tweets we are currently importing. # The file contains a JSON string with these infos: # - "previously_newest_tweet": the ID of the tweet which was the newest before # we started # the current import session # - "session_oldest_tweet": the ID of the oldest tweet imported in this session, # which is # normally the last imported tweet. SESSION_STATE_FILE = Path(__file__).joinpath( '..', '..', 'session_state_' + BRAND + '.json').abspath() if SESSION_STATE_FILE.exists(): # There is already an active session; load the session state from the file # and continue where we stopped. SESSION_STATE = json.loads(SESSION_STATE_FILE.bytes()) else: # We are stating a new import session, so lets start by writing an session # state file with the currently newest tweet ID. document_ids = tuple(filter(lambda id_: not id_.startswith('_'), database)) SESSION_STATE = { 'previously_newest_tweet': max(document_ids), 'session_oldest_tweet': None } SESSION_STATE_FILE.write_text(json.dumps(SESSION_STATE)) # The twitter client may stop iterating the tweets at some point. # In order to automatically continue at the last position, we put the # import in a "while"-loop which will be stopped when there are no new # tweets to import. while True:
def with_cropped_image(self): image = Path(__file__).joinpath('..', 'assets', 'cropped.jpg').abspath() self.arguments['cropped_image'] = NamedBlobImage(data=image.bytes(), filename=u'cropped.gif') return self
def with_dummy_image(self): image = Path(__file__).joinpath('..', 'assets', 'fullhd.jpg').abspath() self.arguments['image'] = NamedBlobImage(data=image.bytes(), filename=u'test.gif') return self
def load_config_clicked(self): filename = QtWidgets.QFileDialog.getOpenFileName()[0] filepath = Path(filename) if not(filepath != "" and filepath.exists()): print "Invalid file given '%s'" % str(filepath) return raw = filepath.bytes() json_data = json.loads(raw) self.core.configuration.Clear() try: json2pb(self.core.configuration, json_data) except: print "Invalid JSON data given" return # -- Clear everything self.inputs_table.clearContents() self.inputs_table.setRowCount(0) self.initial_state_list.clear() self.bp_list.clear() self.start_field.clear() self.stop_field.clear() self.policy_table.clearContents() self.policy_table.setRowCount(0) self.libcall_selector.clear() self.syscall_selector.clear() self.instruction_selector.clear() # ------------ # -- Fill everything back if "start" in json_data: self.start_field.setText(hex(json_data["start"])) if "stop" in json_data: self.stop_field.setText(hex(json_data["stop"])) if "libcalls" in json_data: for lib in [x for x in json_data["libcalls"] if x["name"] in self.libcalls]: self.libcall_selector.addItem(lib["name"]) if "syscalls" in json_data: for lib in [x for x in json_data["syscalls"] if x["name"] in self.syscalls]: self.syscall_selector.addItem(lib["name"]) if "instrs" in json_data: for i in [x for x in json_data["instrs"] if x["ident"].lower() in self.instructions]: self.instruction_selector.addItem(i["ident"].lower()) if "policy" in json_data: for pol in json_data["policy"]: i = self.add_policy_item_action() split = pol.split(" => ") split2 = split[0].split("::") if len(split2) == 1: self.set_policy_item_values(i, [split[0], "", "", "", split[1]]) else: self.set_policy_item_values(i, split2+[split[1]]) if "inputs" in json_data: for input in json_data["inputs"]: print "Get in input !" i = self.add_input_action() wid = self.inputs_table.cellWidget(i, 0) wid.setCurrentIndex(wid.findText(input["typeid"])) self.inputs_table.item(i, 1).setText(hex(input["address"])) wid = self.inputs_table.cellWidget(i, 4) wid.setCurrentIndex(wid.findText(input["action"])) wid = self.inputs_table.cellWidget(i, 5) wid.setCurrentIndex(wid.findText(input["when"])) if input["typeid"] == "REG": self.inputs_table.item(i, 2).setText(input["reg"]["name"]) reg = input['reg']["value"] value = {"BIT8": reg["value_8"], "BIT16": reg["value_16"], "BIT32": reg["value_32"], "BIT64": reg["value_64"], "BIT80": reg["value_80"], "BIT128": reg["value_128"], "BIT256": reg["value_256"]}[reg["typeid"]] if isinstance(value, int): self.inputs_table.item(i, 3).setText(hex(value)) else: self.inputs_table.item(i, 3).setText(to_hex(value)) else: self.inputs_table.item(i, 2).setText(hex(input["mem"]["addr"])) self.inputs_table.item(i, 3).setText(to_hex(base64.b64decode(input["mem"]["value"]))) if "breakpoints" in json_data: for bp in json_data["breakpoints"]: self.bp_list.addItem(hex(bp)) if "initial_state" in json_data: for item in json_data["initial_state"]: self.initial_state_list.addItem("%x -> %x: %s" %(item["addr"], item["addr"]+len(item["value"]), item["value"])) if "direction" in json_data: self.direction_selector.setCurrentIndex(self.direction_selector.findText(json_data["direction"].title())) if "callcvt" in json_data: self.callcvt_selector.setCurrentIndex(self.callcvt_selector.findText(json_data["callcvt"].lower())) if "ksteps" in json_data: self.k_spinbox.setValue(json_data["ksteps"]) if "analysis_name" in json_data: self.analysis_name_selector.setCurrentIndex(self.analysis_name_selector.findText(json_data["analysis_name"])) if "solver" in json_data: index = self.solver_selector.findText(json_data["solver"]) if index != -1: self.solver_selector.setCurrentIndex(index) if "incremental" in json_data: self.incremental_solving_checkbox.setChecked(json_data["incremental"]) if "timeout" in json_data: self.timeout_spinbox.setValue(json_data["timeout"]) if "optim_cstprop" in json_data: self.cstprop_checkbox.setChecked(json_data["optim_cstprop"]) if "optim_rebase" in json_data: self.rebase_checkbox.setChecked(json_data["optim_rebase"]) if "optim_row" in json_data: self.row_checkbox.setChecked(json_data["optim_row"]) if "default_action" in json_data: self.default_action_selector.setCurrentIndex(self.default_action_selector.findText(json_data["default_action"])) if "verbosity" in json_data: self.verbosity_slider.setValue(json_data["verbosity"]) if "additional_parameters" in json_data: analyse = self.parent.analysis_from_name(json_data["additional_parameters"]["typeid"]) analyse.config_widget.set_fields(json_data["additional_parameters"]) self.configuration_textarea.setText(raw)
def docgen(all_stories, project_dir, story_dir, build_dir, temp_dir, check=False): """ Generate markdown documentation. """ docfolder = build_dir if check: if temp_dir.exists(): temp_dir.rmtree(ignore_errors=True) temp_dir.mkdir() directory_template(all_stories, project_dir, story_dir, temp_dir, readme=False).ensure_built() temp_dir.joinpath("changelog.md").write_text(changelog(project_dir)) temp_dir.joinpath("fingerprint.txt").remove() print("Docs checked") assert len(list(pathquery(temp_dir))) == len(list( pathquery(docfolder))), "Different real docs to generated" for temp_docfile in pathquery(temp_dir): if not temp_docfile.isdir(): equivalent_realdocfile = Path( temp_docfile.replace(temp_dir, docfolder)) print("Checking {}".format(equivalent_realdocfile)) textfile = mimetypes.guess_type( temp_docfile)[0] is not None and mimetypes.guess_type( temp_docfile)[0].startswith("text") if textfile: error_message = "Generated file different from real,\n{}".format( "".join( difflib.ndiff( equivalent_realdocfile.text().splitlines(1), temp_docfile.text().splitlines(1), ))) assert (equivalent_realdocfile.text() == temp_docfile.text()), error_message else: assert ( equivalent_realdocfile.bytes() == temp_docfile.bytes() ), "Generated file different from real, please rerun docgen or report bug." else: if docfolder.exists(): docfolder.rmtree(ignore_errors=True) docfolder.mkdir() directory_template(all_stories, project_dir, story_dir, docfolder, readme=False).ensure_built() docfolder.joinpath("changelog.md").write_text(changelog(project_dir)) docfolder.joinpath("fingerprint.txt").remove() print("Docs generated")