def writeL2Report(self,writer): ''' writes a 2-level hierarchic report from this dictionary to the given writer (or file)''' table = htmlutils.SortableTable() table.addClass("rpt") total = sum(self.values()) table.setHeader(["Waarde", "Aantal", "Percentage"]) for x in self.getSortedResult(): row = htmlutils.TableRow() row.addClass("value-row") namecell = htmlutils.Cell() namecell.addClass("fieldname") namecell.content = tr.tr(x[0]).replace("'","") valuecell = htmlutils.Cell() valuecell.content = "%s" % x[1] valuepctcell = htmlutils.Cell() valuepctcell.content = "%d%" % (100*x[1]/total) if (len(self.level2dict[x[0]].keys()) <= getMaxDetail()): row.tooltip = self.getReport(self.level2dict[x[0]]) row.tooltiptitle = tr.tr(x[0]).replace("'","") row.appendCells([namecell,valuecell,valuepctcell]) table.addRow(row) table.renderTo(writer)
def reportUsage(self, totaldocs): '''Generate a row about this field in the fieldstats table containing the usage statistics of the field this row represents.''' row = htmlutils.TableRow() row.addClass("value-row") if (self.getNBDocuments() / totaldocs > 0.99): row.addClass("q-complete") elif (self.getNBDocuments() / totaldocs > 0.8): row.addClass("q-almost-complete") if (self.getAverageUsePerDocument() > 1.001): row.addClass("multi-value") if (self.fieldname in invulboek.allfields): row.addClass("invulboek") '''Determine whether a detailed popup should be shown for this row. Also, if no popup is shown, the checkbox for this row is disabled so that the detail table can also not be shown for printing.''' values = self.reportValueBreakdown(id) if self.shouldReportDetail(values): # create the tooltip row.tooltip = values.getReport() row.tooltiptitle = tr.tr(self.fieldname) fieldnameCell = htmlutils.Cell() fieldnameCell.addClass("fieldname") fieldnameCell.content = tr.tr(self.fieldname) usedDocs = htmlutils.Cell("%.0f" % round(100.0 * self.getNBDocuments() / totaldocs)) usedDocs.addClass("usedDocs") usedDocsAbs = htmlutils.Cell(str(self.getNBDocuments())) usedDocsAbs.addClass("usedDocsAbs") valuesperdoc = htmlutils.Cell("%.2f" % self.getAverageUsePerDocument()) valuesperdoc.addClass("valuesperdoc") averagefieldlength = htmlutils.Cell("%.0f" % self.getAverageFieldLength()) averagefieldlength.addClass("averagefieldlength") numberofuses = htmlutils.Cell(str(len(values))) numberofuses.addClass("number-of-use") row.appendCells([fieldnameCell,usedDocs, usedDocsAbs, valuesperdoc, averagefieldlength, numberofuses]) return row
def get_system_specs(): print('get sys specs') str_system = "" cpu_info = cpuinfo.get_cpu_info() str_system += "CPU {} {}. ".format(cpu_info['brand'], cpu_info['family']) print('get sys specs1.1') memory_info = psutil.virtual_memory() str_system += "{:03.2f} GB RAM memory. ".format(memory_info.total / (1024 * 1024 * 1024)) print('get sys specs2') nvidia_cmd = shutil.which("nvidia-smi") if nvidia_cmd: gpu_info = subprocess.check_output([nvidia_cmd]) gpu_info_ext = subprocess.check_output([nvidia_cmd, '-L']) for line in gpu_info.decode('ascii').split("\n"): if "CarlaUE4" in line: gpu_id = tr(' ', '', line, 's').split(" ")[1] for gpu_line in gpu_info_ext.decode('ascii').split("\n"): gpu_line_id = gpu_line.split(" ")[1].split(":")[0] if gpu_line_id == gpu_id: gpu_model = gpu_line.split(":")[1].split("(")[0] str_system += "GPU {}".format(gpu_model) break print('get sys specs3') return str_system
def brainduck(): # Be "duck". try: while True: _brainff.f**k(input("Brainduck >")) except KeyboardInterrupt: print("") except: print(tr("BRAIN_ERR","Something wrong happened, so Brainduck was crashed."))
def fordFulkerson(G, s, t, aug=bfsAug): # Max flow s to t H, f = tr(G), defaultdict(int) # Transpose and flow while True: # While we can improve things P, c = aug(G, H, s, t, f) # Aug. path and capacity/slack #print('p', P, 'c', c) if c == 0: return f # No augm. path found? Done! u = t # Start augmentation while u != s: # Backtrack to s u, v = P[u], u # Shift on step if v in G[u]: f[u, v] += c # Forward edge? Add slack else: f[v, u] -= c # Backward edge? Cancel slack
def writeCollectionThesaurusReport(self, writer, collection): '''Comparison between this thesaurus and the specified collection. Generate a collection with thesaurus comparison report in HTML format of a specified museum collection. This report is structured with a counter dict and creates a table per field, for each field in fields_to_check.''' writer.write("<h2>%s Thesaurus</h2>\n" % self.name) for f in fields_to_check: # start counter dict statusmap = utils.CounterDict() for object in collection.objects: fieldvalue = object[f] for value in fieldvalue: statusmap.count(self.getStatusOfWord(value),value) writer.write("<h3>%s Thesaurus overeenkomst: %s</h3>\n" % (self.name, tr.tr(f))) statusmap.writeL2Report(writer)
def writeCollectionThesauriReport(writer,collection): '''Comparison between all loaded reference thesauri and the specified collection. Generates a HTML report for each fields_to_check, of all objects with the best scores of those fields. A counterDict style table is created for each field. ''' if (len(getThesauri()) == 0): pass writer.write("<h2>Thesaurus samenvattingen</h2>\n") for f in fields_to_check: statusmap = utils.CounterDict() for object in collection.objects: fieldvalue = object[f] for value in fieldvalue: statusmap.count(getThesauriStatusOfWord(value),value) writer.write("<h3>Thesaurus samenvatting: %s</h3>\n" % (tr.tr(f))) statusmap.writeL2Report(writer)
def sysinfo(): defaultset() print(str(tr('SYSINFO_OS','Your OS:\t {0}\n') + tr('SYSINFO_LOCATION','ASVirtualPC Location:\t {1}\n')+ tr('SYSINFO_PCNAME','Computer Name:\t {2}\n\n')+ tr('SYSINFO_USERNAME','Your Userame:\t {3}\n')+ tr('SYSINFO_LANG','Your Language:\t {4}\n\n')+ tr('SYSINFO_PYTHONVER','Pyhon Version:\t {5}')).format( platform.system(), os.getcwd(), platform.node(), settings.get("user",{"name":""}).get("name",""), settings.get("user",{"lang":""}).get("lang",""), platform.python_version() ))
def whatIsTheNumber(): try: random.seed() num = random.randint(1,1000) print(tr("WITN_BEGIN","What is the number?(from 1 to 1000)")) n=0 while True: n+=1 b="" if n!=1: b="s" answer=int(input(tr("WITN_INPUT","({a} time{s})Number >").format(a=n,s=b))) if answer == num: print(tr("WITN_CORRECT","Correct!")) break elif answer > num: print(tr("WITN_BIG","That's big.")) else: print(tr("WITN_SMALL","That's small.")) except KeyboardInterrupt: print(tr("WITN_CLOSED","The number is {a}. See you!").format(a=num)) except: pass
def remove_useless_symbol(text): return tr.tr(USELESS_SYMBOL_CHARS, '', text, 'd')
def validate(context): """ The flow of `build()` is reconstructed to test particular configuration and file settings against one another. Future improvements may entail collecting this structure in a datatype in `build()` and iterating through that datatype in `validate()`. """ inputs = context._invocation["inputs"] if ("DWIPositiveData" in inputs.keys()) and ("DWINegativeData" in inputs.keys()): info_pos = inputs["DWIPositiveData"]["object"]["info"] info_neg = inputs["DWINegativeData"]["object"]["info"] if ("PhaseEncodingDirection" in info_pos.keys()) and ( "PhaseEncodingDirection" in info_neg.keys() ): pedir_pos = inputs["DWIPositiveData"]["object"]["info"][ "PhaseEncodingDirection" ] pedir_neg = inputs["DWINegativeData"]["object"]["info"][ "PhaseEncodingDirection" ] pedir_pos = tr("ijk", "xyz", pedir_pos) pedir_neg = tr("ijk", "xyz", pedir_neg) if pedir_pos == pedir_neg: raise Exception( "DWIPositive and DWINegative must have " + "opposite phase-encoding directions." ) elif not ( ((pedir_pos, pedir_neg) == ("x-", "x")) or ((pedir_pos, pedir_neg) == ("x", "x-")) or ((pedir_pos, pedir_neg) == ("y-", "y")) or ((pedir_pos, pedir_neg) == ("y", "y-")) ): raise Exception( "DWIPositive and DWINegative have unrecognized " + "phase-encoding directions" ) else: raise Exception( "DWIPositive or DWINegative input data is missing " + "PhaseEncodingDirection metadata!" ) else: raise Exception("DWIPositive or DWINegative input data is missing!") # Loop through the individual Diffusion files for i in range(1, 11): # i=1 is a special case here # the list of Diffusion files follows the format of # DWIPositiveData, DWIPositiveData2, ...3, .... if i == 1: j = "" else: j = i # We only add to posData and negData if both are present # If only one is present, warn them in validate() if ("DWIPositiveData{}".format(j) in inputs.keys()) and ( "DWINegativeData{}".format(j) in inputs.keys() ): # Grab the Phase Encoding info_pos = inputs["DWIPositiveData{}".format(j)]["object"]["info"] info_neg = inputs["DWINegativeData{}".format(j)]["object"]["info"] if ("PhaseEncodingDirection" in info_pos.keys()) and ( "PhaseEncodingDirection" in info_neg.keys() ): PE_pos = tr("ijk", "xyz", info_pos["PhaseEncodingDirection"]) PE_neg = tr("ijk", "xyz", info_neg["PhaseEncodingDirection"]) else: raise Exception( "DWIPositiveData%i or DWINegativeData%i " 'is missing "PhaseEncodingDirection"!', j, j, ) # Grab each of the pos/neg bvec/bval files or make them None if "DWIPositiveBvec{}".format(j) not in inputs.keys(): raise Exception( "DWIPositiveBvec{} is missing! Please include".format(j) + " as an input before proceeding." ) if "DWINegativeBvec{}".format(j) not in inputs.keys(): raise Exception( "DWINegativeBvec{} is missing! Please include".format(j) + " as an input before proceeding." ) if "DWIPositiveBval{}".format(j) not in inputs.keys(): raise Exception( "DWIPositiveBval{} is missing! Please include".format(j) + " as an input before proceeding." ) if "DWINegativeBval{}".format(j) not in inputs.keys(): raise Exception( "DWINegativeBval{} is missing! Please include".format(j) + " as an input before proceeding." ) if PE_pos == PE_neg: raise Exception( "DWIPositiveData%i and DWINegativeData%i have " "the same PhaseEncodingDirection (%s)!", j, j, PE_pos, ) elif not ( ((pedir_pos, pedir_neg) == (PE_pos, PE_neg)) or ((pedir_pos, pedir_neg) == (PE_neg, PE_pos)) ): raise Exception( "DWI input pair #${} phase-encoding directions ".format(j) + "({},{}) do not match primary ".format(PE_pos, PE_neg) + "pair ({},{}). Exiting!".format(pedir_pos, pedir_neg) ) # Warn of the Exclusive OR (XOR) case elif ("DWIPositiveData{}".format(j) in inputs.keys()) ^ ( "DWINegativeData{}".format(j) in inputs.keys() ): log.warning( "Only one of DWIPositiveData%i or " "DWINegativeData%i " "was selected. Thus none of their related data is included " "in this analysis.", j, j, ) if "DWIPositiveData" in inputs.keys(): info = inputs["DWIPositiveData"]["object"]["info"] if "EffectiveEchoSpacing" not in info.keys(): raise Exception( '"EffectiveEchoSpacing" is not found in DWIPositiveData. ' + "This is required to continue! Exiting." )
def test_tr(): string = "chenminghellloworld" result = tr.tr(string, 'l', 'xxx') # print(f">>> result is {result}.") assert result == "chenminghexxxxxxxxxoworxxxd"
def test_ds(): assert_equals(tr("a", "0", "aa00", "ds"), "0") assert_equals(tr("a-z", "0-9", "aa00", "ds"), "0")
def quit(): print(tr("SEEYOU","See you!")) sys.exit()
def test_delete(): assert_equals(tr("ab", "", "abc", "d"), "c") assert_equals(tr("\\", "", "\\a\\", "d"), "a")
def test_cd(): assert_equals(tr("ab", "", "abc", "cd"), "ab") assert_equals(tr("ab", "", "abcabcabc", "cd"), "ababab")
def test_no_option(): assert_equals(tr('ab', 'cd', 'ab'), 'cd') assert_equals(tr('a-z', 'A-Z', 'ab'), 'AB') assert_equals(tr('a\-b', 'cde', 'a-b'), 'cde') assert_equals(tr('1-9', 'A-I', '8429503671'), 'HDBIE0CFGA') assert_equals(tr('1-9', 'ABC#', '8429503671'), '##B##0C##A')
def test_cs(): assert_equals(tr('a', '0', 'aa11', 'cs'), 'aa0') assert_equals(tr('a', '0', '11aa11', 'cs'), '0aa0')
def test_squeeze(): assert_equals(tr('a', '', 'aabcaa', 's'), 'abca') assert_equals(tr('$', '', '$$$$a', 's'), '$a')
def test_complement(): assert_equals(tr('ab', '\-', '123', 'c'), '---')
def dice(): random.seed() print(str(random.randint(1,6)) + tr("DICE"," came out."))
def test_delete(): assert_equals(tr('ab', '', 'abc', 'd'), 'c') assert_equals(tr('\\', '', '\\a\\', 'd'), 'a')
from tr import begintr, tr cmd = "" def defaultset(): filename = "settings.json" with open(filename, "r") as fp: global settings settings = json.load(fp) try: defaultset() begintr() print(tr("ASVPC", "AS Virtual PC")) while True: ndo = datetime.datetime.now() now = ndo.strftime(settings["style"]["datetime"]) cmd = input( str(settings["style"]["prompt"]).format( nowtime=now, user=settings["user"]["name"], year=ndo.year, month=ndo.month, day=ndo.day, hour=ndo.hour, minute=ndo.minute, second=ndo.second)) docommand(cmd)
def test_cd(): assert_equals(tr('ab', '', 'abc', 'cd'), 'ab') assert_equals(tr('ab', '', 'abcabcabc', 'cd'), 'ababab')
output_stem = 'output/disc-h1' # Take the disc files and split at 1k boundaries # Save the xbg chunk which has the file list # Content starts at xce data_fh = open(disc_img, 'rb') # The files on the disk are listed in this chunk. # Make it more readable and write the list to screen for now and to file print("File list") data_fh.seek(file_list_location) file_list_raw = data_fh.read(chunk_size) ## Should refine this to extract the strings file_list_readable = tr('\xe5', ' ', file_list_raw.decode('latin-1')) #print(file_list_readable) fl_fh = open(output_stem + '-files', 'wb') fl_fh.write(str.encode(file_list_readable)) fl_fh.close # Move on to where the content starts data_fh.seek(contents_start) # Set up the loop count = 1 contig_fh = open(output_stem + '-' + str(count), 'wb') text_raw = data_fh.read(chunk_size) while (len(text_raw) == 1024): text_readable = tr('\x80-\xff', '\x00-\x7f', text_raw.decode('latin-1'))
def test_ds(): assert_equals(tr('a', '0', 'aa00', 'ds'), '0') assert_equals(tr('a-z', '0-9', 'aa00', 'ds'), '0')
strftime=( """ For a given date, return a date string in strftime(3) format """, lambda v, f='%F %T%z', tz='UTC': v.strftime(f), ), to_set=(""" Create a set from list """, lambda l: builtins.set(l)), to_date=( """ For a given string, try and parse the date """, # NOTE This isn't resilient against leapseconds # https://stackoverflow.com/questions/1697815/how-do-you-convert-a-time-struct-time-object-into-a-datetime-object#comment31967564_1697838 lambda v, f='%Y-%m-%d %H:%M:%S.%f': datetime(*(time.strptime(v, f)[:6]) )), to_url=(""" Take values from a dict passed in and return a string formatted in the form of a URL """, lambda v, f='https://{hostname}': f.format(**v)), tr=(""" Emulate tr(1) """, lambda s, x, y, m='': _tr.tr(x, y, s, m)), uniq=(""" Remove duplicates items from set keeping order """, lambda l: more_itertools.unique_everseen(l)), unshift=(""" Prepend items to a list and return the list """, lambda *n: prepend(*n)), values=(""" Return the values of a dict passed in """, lambda d: d.values()), wrap=(""" Wrap value with "parantheses" specified in format (default "()") """, lambda v, t='()': '{}{}{}'.format(t[0], v, t[1])), ) filters.update({ # Seems del is specially treated inside dict() 'del': (""" Remove item at index x from list """,
def test_complement(): assert_equals(tr("ab", "\-", "123", "c"), "---")
def test_squeeze(): assert_equals(tr("a", "", "aabcaa", "s"), "abca") assert_equals(tr("$", "", "$$$$a", "s"), "$a")
def getReport(self, dict=None): '''Generate a HTML report from this dictionary.''' html = "" html += '<table class="countertable" border="0">' html += "<thead><tr><th>Aantal</th><th>%</th><th>Waarde</th></tr></thead>\n<tbody>" if (dict is None): dict = self.realdict total = sum(dict.values()) for x in self.getSortedResult(dict): html += "<tr>\n<td>%s</td>\n<td>%d%</td>\n<td>%s</td>\n</tr>" % (x[1], 100*x[1]/total , tr.tr(x[0]).replace("'","")) html += "</tbody></table>" return html
def test_cs(): assert_equals(tr("a", "0", "aa11", "cs"), "aa0") assert_equals(tr("a", "0", "11aa11", "cs"), "0aa0")
def build(context): config = context.config inputs = context._invocation["inputs"] # Default Config Settings # DwellTime for DWI volumes EffectiveEchoSpacing = "NONE" # no gradient correction unless we are provided with a .grad file if "GradientCoeff" in inputs.keys(): GradientDistortionCoeffs = create_sanitized_filepath( context.get_input_path("GradientCoeff") ) else: GradientDistortionCoeffs = "NONE" # Set PEdir variable based on Phase-Encoding directions PEdir = "" pedir_pos = None pedir_neg = None if ("DWIPositiveData" in inputs.keys()) and ("DWINegativeData" in inputs.keys()): info_pos = inputs["DWIPositiveData"]["object"]["info"] info_neg = inputs["DWINegativeData"]["object"]["info"] if ("PhaseEncodingDirection" in info_pos.keys()) and ( "PhaseEncodingDirection" in info_neg.keys() ): pedir_pos = tr("ijk", "xyz", info_pos["PhaseEncodingDirection"]) pedir_neg = tr("ijk", "xyz", info_neg["PhaseEncodingDirection"]) if ((pedir_pos, pedir_neg) == ("x-", "x")) or ( (pedir_pos, pedir_neg) == ("x", "x-") ): PEdir = 1 elif ((pedir_pos, pedir_neg) == ("y-", "y")) or ( (pedir_pos, pedir_neg) == ("y", "y-") ): PEdir = 2 # Create the posData and negData lists # posData and negData are '@'-delimited lists of nifti files on the command # line. We will build them, validate them, and link posData = [] negData = [] # Even With the DWIPos/Neg data checking out, above, # I am going to loop through everything to be more compact # making a lists of the pos/neg data/bvec/bval to validate later test = {"data": {}, "PE": {}, "bvecs": {}, "bvals": {}} valid = {"data": {}, "PE": {}, "bvecs": {}, "bvals": {}} base_dir = op.join(context.work_dir, "tmp_input") for i in range(1, 11): # i=1 is a special case here # the list of Diffusion files follows the format of # DWIPositiveData, DWIPositiveData2, ...3, .... if i == 1: j = "" else: j = i # We only add to posData and negData if both are present # If only one is present, warn them in validate() if ("DWIPositiveData{}".format(j) in inputs.keys()) and ( "DWINegativeData{}".format(j) in inputs.keys() ): # Save the filepaths for later: test["data"]["Pos"] = create_sanitized_filepath( context.get_input_path("DWIPositiveData{}".format(j)) ) test["data"]["Neg"] = create_sanitized_filepath( context.get_input_path("DWINegativeData{}".format(j)) ) # We know what we want the end result to be. We append to the list # and ensure that it is correct in validate() posData.append(op.join(base_dir, "Pos{}".format(i), "data.nii.gz")) negData.append(op.join(base_dir, "Neg{}".format(i), "data.nii.gz")) # Making the directories for these as we go os.makedirs(op.join(base_dir, "Pos{}".format(i)), exist_ok=True) os.makedirs(op.join(base_dir, "Neg{}".format(i)), exist_ok=True) # Grab the Phase Encoding info_pos = inputs["DWIPositiveData{}".format(j)]["object"]["info"] info_neg = inputs["DWINegativeData{}".format(j)]["object"]["info"] if ("PhaseEncodingDirection" in info_pos.keys()) and ( "PhaseEncodingDirection" in info_neg.keys() ): test["PE"]["Pos"] = tr("ijk", "xyz", info_pos["PhaseEncodingDirection"]) test["PE"]["Neg"] = tr("ijk", "xyz", info_neg["PhaseEncodingDirection"]) else: test["PE"]["Pos"] = None test["PE"]["Neg"] = None # Grab each of the pos/neg bvec/bval files or make them None if "DWIPositiveBvec{}".format(j) in inputs.keys(): test["bvecs"]["Pos"] = create_sanitized_filepath( context.get_input_path("DWIPositiveBvec{}".format(j)) ) else: test["bvecs"]["Pos"] = None if "DWINegativeBvec{}".format(j) in inputs.keys(): test["bvecs"]["Neg"] = create_sanitized_filepath( context.get_input_path("DWINegativeBvec{}".format(j)) ) else: test["bvecs"]["Neg"] = None if "DWIPositiveBval{}".format(j) in inputs.keys(): test["bvals"]["Pos"] = create_sanitized_filepath( context.get_input_path("DWIPositiveBval{}".format(j)) ) else: test["bvals"]["Pos"] = None if "DWINegativeBval{}".format(j) in inputs.keys(): test["bvals"]["Neg"] = create_sanitized_filepath( context.get_input_path("DWINegativeBval{}".format(j)) ) else: test["bvals"]["Neg"] = None # Comparing Phase Encoding Direction of the first to the Phase # Encoding. # The redundancy (first cycle is the first one) helps reduce the # complexity of the code. if (pedir_pos, pedir_neg) == (test["PE"]["Pos"], test["PE"]["Neg"]): # making a lists of the pos/neg data/bvec/bval to validate for key in ["data", "PE", "bvecs", "bvals"]: valid[key]["Pos"] = test[key]["Pos"] valid[key]["Neg"] = test[key]["Neg"] # if the phases are reversed, flip the order of our data/vecs/vals elif (pedir_pos, pedir_neg) == (test["PE"]["Neg"], test["PE"]["Pos"]): # making a lists of the pos/neg data/bvec/bval to validate for key in ["data", "PE", "bvecs", "bvals"]: valid[key]["Pos"] = test[key]["Neg"] valid[key]["Neg"] = test[key]["Pos"] # If something is way different, fill them with 'None' else: for key in ["data", "PE", "bvecs", "bvals"]: valid[key]["Pos"] = None valid[key]["Neg"] = None for key in ["data", "bvecs", "bvals"]: if "data" == key: ext = "nii.gz" else: ext = key[:-1] for pol in ["Pos", "Neg"]: make_sym_link( valid[key][pol], op.join(base_dir, "{}{}".format(pol, i), "data.{}".format(ext)), ) # Read necessary acquisition params from fMRI EffectiveEchoSpacing = "" if "DWIPositiveData" in inputs.keys(): info = inputs["DWIPositiveData"]["object"]["info"] if "EffectiveEchoSpacing" in info.keys(): EffectiveEchoSpacing = format(info["EffectiveEchoSpacing"] * 1000, ".15f") # Some options that may become user-specified in the future, but use standard HCP # values for now. Cutoff for considering a volume "b0", generally b<10, but for 7T # data they are b<70 b0maxbval = "100" # Specified value is passed as the CombineDataFlag value for the # eddy_postproc.sh script. CombineDataFlag = "1" # If JAC resampling has been used in eddy, this value # determines what to do with the output file. # 2 - include in the output all volumes uncombined (i.e. # output file of eddy) # 1 - include in the output and combine only volumes # where both LR/RL (or AP/PA) pairs have been # acquired # 0 - As 1, but also include uncombined single volumes # Defaults to 1 ExtraEddyArgs = " " # This may later become a configuration option...as GPUs are integrated # into the Flywheel architecture. A patch to the DiffPreprocPipeline.sh # is needed for this to function correctly. No_GPU = True config = context.config params = OrderedDict() params["path"] = context.work_dir params["subject"] = config["Subject"] params["dwiname"] = config["DWIName"] params["posData"] = "@".join(posData) params["negData"] = "@".join(negData) params["PEdir"] = PEdir params["echospacing"] = EffectiveEchoSpacing params["gdcoeffs"] = GradientDistortionCoeffs params["dof"] = config["AnatomyRegDOF"] params["b0maxbval"] = b0maxbval params["combine-data-flag"] = CombineDataFlag params["extra-eddy-arg"] = ExtraEddyArgs params["no-gpu"] = No_GPU params["printcom"] = " " context.gear_dict["Diff-params"] = params
def test_no_option(): assert_equals(tr("ab", "cd", "ab"), "cd") assert_equals(tr("a-z", "A-Z", "ab"), "AB") assert_equals(tr("a\-b", "cde", "a-b"), "cde")
def runcipher(dictionary, diclenght): for i in range(diclenght): key = makekey(i, diclenght, dictionary) print(tr(dictionary, key, text))