def order_windows_for_selection_macro(shortnames: List[List[str]], ref: str) -> Tuple[str, str]: """Select windows in the order they will be combined. Args: shortnames: The list of shortnames for the aligned stacks. ref: The reference infocus image. Returns: Window order macro and the macro for concatenating stacks. """ img_num = 1 num_windows = len(shortnames) window_order, concat_list = '', '' for window in range(num_windows): # Build list for concatenation later img = f' image{img_num}={shortnames[window]}' if (('unflip' in ref and shortnames[window] != 'u_0') or ('flip' in ref and 'unflip' not in ref and shortnames[window] != 'f_0')): new_select = f'selectWindow("{shortnames[window]}");' concat_list = join([concat_list, img]) window_order = join([window_order, new_select], '\n') img_num += 1 elif '-under-' in shortnames[window] or '+over+' in shortnames[window]: new_select = f'selectWindow("{shortnames[window]}");' concat_list = join([concat_list, img]) window_order = join([window_order, new_select], '\n') img_num += 1 return window_order, concat_list
def apply_transform( flip: bool, orient_path: str, fnames: List[str], transform_params: Tuple[float, float, float, bool]) -> Tuple[int, str]: """Create macro that applies transformations for pre-alignment. Args: flip (bool): Option for whether the files are flipped or not. orient_path (str): The orientation path (flip, unflip, tfs). fnames (List[str]): List of filenames. transform_params (Tuple[float, float, float, bool]): Tuple of transformation parameters. Returns: Tuple of number of images and shifted rotated macro - num_files: The number of images files being manipulated. - transformed_files_macro: The shifted/rotated macro. """ open_files, apply_translation = '', '' num_files = len(fnames) + 1 for file in fnames: file = join([orient_path, file], '/') trans = get_shift_rot_macro(transform_params) apply_transform = f'open("{file}");' if flip: apply_transform = f'''open("{file}"); {trans} ''' open_files = join([open_files, apply_transform], "\n") transformed_files_macro = open_files return num_files, transformed_files_macro
def write_single_ls_macro(ref: str, path: str, files: List[List[str]], window: int, sift_params: Dict[str, Any]) -> Tuple[str, str]: """Create the linear stack alignment with SIFT (LS) macro for specific orientation, sign, and focus. Similar to write_ls_macro but for a single through focal series (tfs). Args: ref: The reference infocus image filename. path: The orientation path (tfs). files: List of list of image filenames. window: Placeholder for name of new stack being made for bookkeeping when combining all stacks later. sift_params: Dictionary of sift parameter values. Returns: Macro for specific orientation and focus and its shortname. """ # Grab filenames and create shortened name for window. if window == 1: shortname = "-under-" elif window == 2: shortname = "+over+" # Create LS macro sift = ls_macro(sift_params) # Trim excess images and reverse stack if necessary. Exceptions are if only 1 other delete, reverse = trim_and_reverse_single_stack(files, shortname) num_files = len(files) + 1 open_files = "" #apply_transform for file in files: file = join([path, file], '/') apply_transform = f'''open("{file}"); ''' open_files = join([open_files, apply_transform], '\n') # Write macro ref = join([path, ref], '/') macro = f'''open("{ref}"); {open_files} run("Images to Stack", "name={window} title=[] use"); {sift} selectWindow("{window}"); close(); selectWindow("Aligned {num_files} of {num_files}"); rename("{shortname}"); {delete} {reverse} ''' return macro, shortname
def save(self, fname, colormap={}): "Saves the graph as gml" write_gml(graph=self.graph, fname=fname, colormap=colormap) self.fp.write('*** node values ***\n') # writes the mapping first = self.store.values()[0] header = ['state'] + first.keys() self.fp.write(util.join(header)) for fprint, state in sorted(self.store.items()): line = [fprint] + map(int, state.values()) self.fp.write(util.join(line))
def save(self, fname, colormap={}): "Saves the graph as gml" write_gml(graph=self.graph, fname=fname, colormap=colormap) self.fp.write("*** node values ***\n") # writes the mapping first = self.store.values()[0] header = ["state"] + first.keys() self.fp.write(util.join(header)) for fprint, state in sorted(self.store.items()): line = [fprint] + map(int, state.values()) self.fp.write(util.join(line))
def save_states(self, fname): """ Saves the states into a file """ if self.states: fp = open(fname, 'wt') cols = ['STATE'] + self.first.keys() hdrs = util.join(cols) fp.write(hdrs) for state in self.states: cols = [state.fp()] + state.values() line = util.join(cols) fp.write(line) fp.close() else: util.error('no states have been created yet')
def info(ses, fn, t, prt, kind): class Opt: def __init__(self, t): self.after = t self.before = float('inf') self.overview = 'none' # xxx assumes this exists time_metric = util.join('serverStatus', 'localTime') # find and print the first sample after t (only) for metrics in read(ses, fn, Opt(t), progress=False): if kind == 'raw': for sample, sample_time in enumerate(metrics[time_metric]): sample_time = sample_time / 1000.0 if sample_time >= t: break prt('%s at t=%.3f (%s)' % (fn, t, util.f2s(t))) util.print_sample(metrics, sample, prt) break elif kind == 'metadata': prt('metadata at t=%.3f (%s)' % (t, util.f2s(t))) if metrics.metadata: util.print_bson_doc(metrics.metadata, prt, ' ') else: prt(' NOT AVAILABLE') break
def delete_excess_images_macro(files: List[List[str]], short_names: List[List[str]], ref: str) -> str: """Create macro to trim excess images appearing after stack is aligned. Args: files: 2D list of all of the files. short_names: The list of shortnames for the aligned stacks. ref: The reference infocus image. Returns: The macro from trimming excess images from stacks. """ # Initialize strings, set save title, and remove # ref image if necessary. full_img, window_select, trimmed_macro = '', '', '' index = 0 for i in range(len(files)): for j in range(len(files[i])): delete = "" # Delete excess reference images except from overfocus # stack of the same orientation of reference image. if (len(files[i][j]) == 1 and short_names[index] != 'u_+' and 'unflip' in ref): delete = f'''selectWindow("{short_names[index]}"); run("Delete Slice"); ''' elif (len(files[i][j]) == 1 and short_names[index] != 'f_+' and 'flip' in ref and 'unflip' not in ref): delete = f'''selectWindow("{short_names[index]}"); run("Delete Slice"); ''' trimmed_macro = join([trimmed_macro, delete], "\n") index += 1 return trimmed_macro
def render3(template,env): out = template # conditional blocks cre = '(?sx) [#]start-(\w+) (.*?) [#]end-\\1' conditionals = re.findall(cre,out) def mapper(x): name = x.group(1) text = x.group(2) return text if flag(env.get(name,'yes')) else '' out = re.sub(cre,mapper,out,re.X) # variables varnames = re.findall('[$]{?([\w|-]+)}?',template) # TODO sort by desc len + unique #vre = join(varnames+[''],"|","[$]?[$][{{]{0}[|][^}}]+[}}]|[$]?[$][{{]?{0}[}}]?") # +[''] for handling $$ without var name VRE = """ [$]?[$] [{{]? {0}[|][\w|]+ [}}]? | [$]?[$] [{{]? {0} [}}]? """ vre = '(?x)'+join(varnames+[''],"|",VRE) # +[''] for handling $$ without var name def mapper(x): s = x.group() if s.startswith('$$'): return s[1:] # reduce one dollar sign name = s[1:].replace('{','').replace('}','') # TODO one replace operation tmp = name.split('|') name,pipes = tmp[0],tmp[1:] val = str(env.get(name,s)) # TODO handle pipes return val out = re.sub(vre,mapper,out) return out
def fijify_macro(unready_macro: str) -> str: """Converts macro into FIJI format and returns macro. The Fiji format uses ';' as end lines, and sometimes contains additional whitespace. This function accounts for that, edits the macro string, and returns it in the correct format. Args: unready_macro: The un-formatted FIJI macro. Returns: fiji_ready_macro: The correctly formatted FIJI macro. """ fiji_ready_macro = '' for line in unready_macro.splitlines(): # Make strings without ';' appear on same line. # Remove excess whitespace line = line.strip() whitespace = '' if ';' in line: whitespace = '\n' elif line and not line.isspace(): whitespace = ' ' fiji_ready_macro = join([fiji_ready_macro, line, whitespace]) return fiji_ready_macro
def save_plot(plt, title, xlabel, ylabel, data, path): plt.plot(data[0], data[1]) plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.savefig("%s.png" % join(path, title), dpi=300) plt.clf()
def write_gml(graph, fname, colormap={}): "Custom gml exporter" fp = open(fname, "wt") text = ["graph [", "directed 1"] nodepatt = ( 'node [ id %(node)s label "%(node)s" graphics [ fill "%(color)s" w 40 h 30 x %(x)s y %(y)s type "ellipse" ]]' ) rnd = random.randint for node in graph.nodes(): x, y = rnd(50, 200), rnd(50, 200) color = colormap.get(node, "#CCCCFF") param = dict(node=node, x=x, y=y, color=color) text.append(nodepatt % param) edgepatt = 'edge [ source %(source)s target %(target)s graphics [ fill "%(color)s" targetArrow "delta" ]]' for source, target in graph.edges(): pair = (source, target) color = colormap.get(pair, "#000000") param = dict(source=source, target=target, color=color) text.append(edgepatt % param) text.append("]") fp.write(util.join(text, sep="\n")) fp.close()
def info(ses, fn, t, prt, kind): class Opt: def __init__(self, t): self.after = t self.before = float('inf') self.overview = 'none' # xxx assumes this exists time_metric = util.join('serverStatus', 'localTime') # find and print the first sample after t (only) for metrics in read(ses, fn, Opt(t), progress=False): if kind=='raw': for sample, sample_time in enumerate(metrics[time_metric]): sample_time = sample_time / 1000.0 if sample_time >= t: break prt('%s at t=%.3f (%s)' % (fn, t, util.f2s(t))) util.print_sample(metrics, sample, prt) break elif kind=='metadata': prt('metadata at t=%.3f (%s)' % (t, util.f2s(t))) if metrics.metadata: util.print_bson_doc(metrics.metadata, prt, ' ') else: prt(' NOT AVAILABLE') break
def FindResFile(fullName): (files, posList) = FindFile(fullName, "res") path = os.path.dirname(fullName) if files == []: report.Error("No expected result file found for " + util.join(posList, ", ")) return None elif len(files) > 1: report.Error("Several possible expected result files found: " + util.join(files, ", ")) return None else: report.Progress(4, "Expected result file is: " + path + "/" + files[0]) return path + "/" + files[0]
def run_single_ls_align(datafolder: str, reference: str = '', sift_params: Optional[Dict[str, Any]] = None, stack_name: str = 'test_ls_align.tif', fls_files: Optional[List[str]] = None) -> str: """ Aligns all 'dm3' files in the 'datafolder' and saves an aligned tiff stack in the datafolder for a single tfs. Args: datafolder: The datafolder that contains the paths to the dm3 files. reference: The reference tag that will be used for aligning. sift_params: Dictionary of SIFT params. stack_name: The filename for saving the aligned stack. fls_files: The list of fls files Returns: The full run LS align macro for single tfs.. """ # Grab image data path1 = join([datafolder, 'unflip'], '/') path2 = join([datafolder, 'flip'], '/') unflip_files = pull_image_files(fls_files[0]) flip_files = pull_image_files(fls_files[1]) # Generate the Fiji macro for each alignment procedure. if reference == "unflip": ref = unflip_files[1] files = unflip_files path = path1 elif reference == 'flip': ref = flip_files[1] files = flip_files path = path2 # Single alignement all_macros, shortnames = single_ls_alignment(sift_params, files, path) # Post-alignment processing for saving the stack all_macros = post_single_ls_alignment(all_macros, shortnames, files, stack_name, ref) # Format macro to run in FIJI full_ls_macro = format_macro(all_macros) return full_ls_macro
def ValidateResult(name, expResFile, result, stdout, stderr): expResult = util.ReadListFromFile(expResFile) if expResult == None: return false expResult.sort() result.sort() if expResult == result: return true map = {} for key in result: if key in map: map[key] = map[key] + 1 else: map[key] = 1 for key in expResult: if key in map: map[key] = map[key] - 1 else: map[key] = -1 tomuch, tolittle = [], [] for key in map.keys(): if map[key] > 0: tomuch.append(str(map[key]) + " * " + str(key)) elif map[key] < 0: tolittle.append(str(abs(map[key])) + " * " + str(key)) if tomuch != [] or tolittle != []: res = "" if tomuch != []: res = res + "Output not expected: " + util.join(tomuch, ", ") + "\n" if tolittle != []: res = res + "Output not present: " + util.join(tolittle, ", ") + "\n" report.Error("Actual result differs from expected result for " + name, res, stdout, stderr) return false return true
def _synchronize(self, timeseries_sequence): """Synchronize a sequence of timeseries.""" # join sequences on date for items in util.join(timeseries_sequence, key = lambda x: x[0]): # construct a row with one date and default vectors as needed date = next(filter(None, items))[0] values = list(itertools.chain.from_iterable( [i[1:] if i else [self.default_features] for i in items])) yield [date] + values
def mbuild_compute_path(hname, search_path): """Return the full path of the header hname, if found and None otherwise. Search the path in order and see if we find the file""" for p in search_path: tname = util.join(p, hname) tname = os.path.realpath(tname) #mbuild_base.msgb("TESTING", tname) if os.path.exists(tname): return tname return None
def pre_ls_alignment(reference: str, check_sift: bool, path1: str, path2: str, fls_files: List[str], tfs_value: str, fls_value: str) -> Pre_LS_Align: """ Pre-alignment file manipulations for linear stack alignment with SIFT. Check setup, get reference image to align to, and get all image filenames. Args: reference: The type of reference to dictate alignment. Options: 'tfs', 'unflip', 'flip' check_sift (bool): Option for checking SIFT alignment. path1: The first unflip/flip/single path/directory. path2: The first unflip/flip/single path/directory. fls_files: A list of the FLS filenames. tfs_value: The through-focal series option. Options: Unflip/FLip, Single fls_value: The FLS option. Options: One, Two Returns: vals: A tuple of the reference file as well as all other filenames. - vals[0]: The reference filename. - vals[1][0]: 2D list of all files - vals[1][1]: 2D list of ordered image files for path1 - vals[1][2]: 2D list of ordered image files for path2 """ # Check setup of datafolder files1, files2 = read_fls(path1, path2, fls_files, tfs_value, fls_value, check_sift) # Get reference image to align to. ref = '' if reference == 'tfs' or reference == 'unflip': ref_list = files1[1].pop() ref = join([path1, ref_list], '/') elif reference == 'flip': ref_list = files2[1].pop() ref = join([path2, ref_list], '/') all_files = [files1, files2] vals = ref, (all_files, files1, files2) return vals
def _synchronize(self, timeseries_sequence): """Synchronize a sequence of timeseries.""" # join sequences on date for items in util.join(timeseries_sequence, key=lambda x: x[0]): # construct a row with one date and default vectors as needed date = next(filter(None, items))[0] values = list( itertools.chain.from_iterable( [i[1:] if i else [self.default_features] for i in items])) yield [date] + values
def format_macro(all_macros: List[str]) -> str: """Format the full macro so it will be ready for use in FIJI. Args: all_macros: All formatted FIJI macros for alignment. Returns: fiji_ready_macro: The finalized FIJI macro.""" joined_macro = join(all_macros, '\n') full_macro = fijify_macro(joined_macro) return full_macro
def pre_bUnwarp_align( unflip_ref: str, flip_ref: str, mask_files: List[Optional[str]], reference: str, transformation: Tuple[float, float, float, bool] ) -> Tuple[str, str, str, List[Optional[str]]]: """ Precursor to bUnwarpJ alignment to collect files, references, and masks.. Determine the source and target images for bUnwarpJ. Apply any pre-transformations necessary to help with bUnwarpJ alignment. Open and return the masks for source and target. Args: unflip_ref: The unflip infocus image path. flip_ref: The flip infocus image path. mask_files: The filenames for the mask files, can be [None, None]. reference: The reference value for 'unflip'/'flip'. transformation: The pre-shift/rotation to align the infocus images. Returns: The bUnwarpJ macro for carrying out the bUnwarpJ alignment. Additionally returns the source and target path names, as well as the associated masks for each image. """ # Grab reference images if reference == 'unflip': target_path, src_path = unflip_ref, flip_ref target_img = f'target_unflip_image' src_img = f'source_flip_image' src_mask, target_mask = mask_files[1], mask_files[0] elif reference == 'flip': target_path, src_path = flip_ref, unflip_ref target_img = f'target_flip_image' src_img = f'source_unflip_image' src_mask, target_mask = mask_files[0], mask_files[1] # Open files macro = f''' open("{target_path}"); rename("{target_img}"); open("{src_path}"); rename("{src_img}"); ''' # Apply transformations to source image apply_transform = get_shift_rot_macro(transformation) macro = join([macro, apply_transform], '\n') # Open masks masks = [src_mask, target_mask] return macro, src_img, target_img, masks
def send_py_code_to_max(code): """ Sends a command to 3ds Max to run This function is strongly inspired by the contents of https://github.com/cb109/sublime3dsmax/blob/master/sublime3dsmax.py """ try: # Make temporary file and set its contents to the code snippet filepath = join(gettempdir(), 'temp.py') with open(filepath, "w") as f: f.write(code) # The command to run a python file within Max cmd = f'python.ExecuteFile @"{filepath}";' log("Sending " + cmd + " to 3ds Max") minimacrorecorder = window.find_child(text=None, cls="MXS_Scintilla") # If the mini macrorecorder was not found, there is still a chance # we are targetting an ancient Max version (e.g. 9) where the # listener was not Scintilla based, but instead a rich edit box. if minimacrorecorder is None: statuspanel = window.find_child(text=None, cls="StatusPanel") if statuspanel is None: raise Exception(RECORDER_NOT_FOUND) minimacrorecorder = statuspanel.find_child(text=None, cls="RICHEDIT") # Verbatim strings (the @ at sign) are not supported in older Max versions. cmd = cmd.replace("@", "") cmd = cmd.replace("\\", "\\\\") if minimacrorecorder is None: raise Exception(RECORDER_NOT_FOUND) # Encode the command to bytes, send to the mmr, then send # the return key to simulate enter being pressed. cmd = cmd.encode("utf-8") # Needed for ST3! minimacrorecorder.send(winapi.WM_SETTEXT, 0, cmd) minimacrorecorder.send(winapi.WM_CHAR, winapi.VK_RETURN, 0) minimacrorecorder = None except Exception as e: # Raise an error to terminate the adapter raise Exception("Could not send code to Max due to error:\n\n" + str(e))
def CompileJavaFiles(fullName, lang, type, modules): baseName = util.ExtractName(fullName) # Find the compiler to use compiler = os.path.expandvars( cmdline.LookUpWildCard('java', lang, type, 'compiler')) # figure out the names of all the Java files javaFiles = "TMAIN.java" for mod in modules: if os.path.exists(mod + ".java"): javaFiles = javaFiles + " " + mod + ".java" else: package = convert.GetModCls() packageStrings = package.split('.') packageDir = util.join(packageStrings, '/') if os.path.exists(packageDir + ".java") and not packageDir in modules: print("-------> here") javaFiles = javaFiles + " " + packageDir + ".java" if os.path.exists("external_" + mod + ".java"): javaFiles = javaFiles + " external_" + mod + ".java" # Find the flags for the compiler flags = os.path.expandvars( os.path.expanduser(cmdline.LookUpWildCard('java', lang, type, 'cflags'))) # First delete the binary. util.DeleteFiles(["TMAIN.class"]) # build the command and execute it. cmd = compiler + " -d . " + flags + " " + javaFiles (exitCode, dummy1, dummy2) = util.RunCommand(cmd, 0, "Problem when compiling generated code") ok = (exitCode == 0) if ok: if not os.path.exists("TMAIN.class"): report.Error( "TMAIN.class was not created as a result of compiling the generated Java files" ) return false return ok
def VerifyPresenceOfGeneratedFiles(fullName, modules): ok = true package = convert.GetModCls() packageStrings = package.split('.') packageDir = util.join(packageStrings, '/') for mod in modules: file1 = mod + ".java" file2 = "vdm_" + mod + ".java" file3 = packageDir + ".java" files = file1 + " or " + file2 + " or " + file3 if not os.path.exists(file1) and not os.path.exists( file2) and not os.path.exists(file3): report.Error("file " + files + " was not generated for testcase '" + fullName + "'") ok = false return ok
def write_score(self): # The looped section programmatically builds a series of scales # Note that the bass clef is just a tansposition of the treble clef looped = {"treble": []} for start in self.scale('c`', 'c``'): looped["treble"] += self.scale(start, 8, 8) looped["bass"] = self.transpose(looped["treble"], -1, 'octave') # The smart section programmatically builds a series of scales in different keys # Note how we use list comprehension to avoid a for loop, and use step = 2 to play every other note in the treble clef start_notes = self.scale('c```', 'c``') smart = { "treble": [scale(start, -8, key=letter(start) + " major", dur=8, step=2) for start in start_notes], "bass": [scale(self.transpose(start, -1, 'octave'), -8, key=letter(start) + " major", dur=8) for start in start_notes] } self.score = join(looped, smart)
def write_score(self): # The basic section manually builds a scale note by note basic = { "treble": [note("c`", 8), note("e`", 8), note("g`", 8), note("c``", 8)], "bass": [note("c", 8), note("e", 8), note("g", 8), note("c`", 8)] } # The notes section uses the notes function to build a list of notes from a single string intermediate = { "treble": notes('d` fs` a` d``', 8), "bass": notes('d fs a d`', 8) } # The arpeggio section uses the arpeggio function to build a scale from one note to the next arpeggios = { "treble": arpeggio('e`', 'e``', 'E Major', 8), "bass": arpeggio('e', 4, 'E major', 8) } # The length section uses the arpeggio function to build an arpeggio, but specifies a length, rather than a stop note length = { "treble": arpeggio('f`', 4, 'F Major', 8), "bass": arpeggio('f', 4, 'F Major', 8) } starts = self.arpeggio('c`', 'c``') stepped = { 'treble': [[ self.arpeggio(start, self.transpose(start, 7), 16, step=step) for step in [3, 3, 1] ] for start in starts], 'bass': [[ self.arpeggio(self.transpose(start, -7), start, 16, step=step) for step in [1, 3, 3] ] for start in starts] } self.score = join(basic, intermediate, arpeggios, length, stepped)
def crop_video(person_id, video_id, video_path, args): utterance = video_path.split('#')[1] bbox_path = os.path.join(args.bbox_folder, os.path.basename(video_path)[:-4] + '.txt') reader = imageio.get_reader(video_path) chunk_start = float(video_path.split('#')[2].split('-')[0]) d = pd.read_csv(bbox_path) video_count = 0 initial_bbox = None start = 0 tube_bbox = None frame_list = [] chunks_data = [] try: for i, frame in enumerate(reader): bbox = np.array(d.iloc[i]) if initial_bbox is None: initial_bbox = bbox start = i tube_bbox = bbox if bb_intersection_over_union(initial_bbox, bbox) < args.iou_with_initial or len( frame_list) >= args.max_frames: chunks_data += store(frame_list, tube_bbox, video_id, utterance, person_id, start, i, video_count, chunk_start, args) video_count += 1 initial_bbox = bbox start = i tube_bbox = bbox frame_list = [] tube_bbox = join(tube_bbox, bbox) frame_list.append(frame) except IndexError as e: None chunks_data += store(frame_list, tube_bbox, video_id, utterance, person_id, start, i + 1, video_count, chunk_start, args) return chunks_data
def _find_rec_for_missing_file(self, fn, assumed_directory): vmsgb(20, "LOOKING FOR MISSING FILE", "%s assuming %s" % (fn, assumed_directory)) if fn in self.recs: vmsgb(20, "FOUND DEP REC FOR MISSING FILE", fn) return self.recs[fn] if assumed_directory: nfn = util.join(assumed_directory, fn) if nfn in self.recs: vmsgb(20, "FOUND DEP REC FOR MISSING FILE(2)", nfn) return self.recs[nfn] nfn = os.path.realpath(nfn) if nfn in self.recs: vmsgb(20, "FOUND DEP REC FOR MISSING FILE(3)", nfn) return self.recs[nfn] vmsgb(20, "NO DEP REC FOR MISSING FILE", fn) return None
def crop_video_neighbor(person_id, video_id, video_path, args): bbox_path = os.path.join(args.bbox_folder, os.path.basename(video_path)[:-4] + '.txt') reader = imageio.get_reader(video_path) d = pd.read_csv(bbox_path) video_count = 0 prev_bbox = None start = 0 tube_bbox = None frame_list = [] chunks_data = [] try: for i, frame in enumerate(reader): bbox = np.array(d.iloc[i]) if prev_bbox is None: prev_bbox = bbox start = i tube_bbox = bbox if bb_intersection_over_union(prev_bbox, bbox) < args.iou_with_initial or len( frame_list) >= args.max_frames: chunks_data += store(frame_list, tube_bbox, video_id, person_id, start, i, video_count, args) video_count += 1 start = i tube_bbox = bbox frame_list = [] prev_bbox = bbox tube_bbox = join(tube_bbox, bbox) frame_list.append(frame) except IndexError as e: None chunks_data += store(frame_list, tube_bbox, video_id, person_id, start, i + 1, video_count, args) return chunks_data
def render2(template,env): # variables varnames = re.findall('[$]{?(\w+)}?',template) # TODO sort by desc len + unique vre = join(varnames+[''],"|","[$]?[$]{{?{0}}}?") # +[''] for handling $$ without var name def mapper(x): s = x.group() if s.startswith('$$'): return s[1:] # reduce one dollar sign name = s[1:].replace('{','').replace('}','') # TODO one replace operation return str(env.get(name,s)) out = re.sub(vre,mapper,template) # conditional blocks cre = '(?sx) [#]start-(\w+) (.*?) [#]end-\\1' conditionals = re.findall(cre,out) def mapper(x): name = x.group(1) text = x.group(2) return text if flag(env.get(name,'yes')) else '' out = re.sub(cre,mapper,out,re.X) return out
def attach_to_maya(contents): """ Defines commands to send to Maya, and sends the attach code to it. """ global attach_code, Maya_path config = contents['arguments'] # Format the simulated attach response to send it back to the debugger # while we set up the debugpy in the background attach_code = ATTACH_TEMPLATE.format( debugpy_path=debugpy_path, hostname=config['debugpy']['host'], port=int(config['debugpy']['port']), interpreter=config['interpreter'], ) # Copy code to temporary file and start a Maya console with it try: send_code_to_maya(attach_code) except Exception as e: # Raising exceptions shows the text in the Debugger's output. # Raise an error to show a potential solution to this problem. log("Exception occurred: \n\n" + str(e)) import platform module_path = join(dirname(__file__), 'resources', 'module') separator = ';' if platform.system() == 'Windows' else ':' raise Exception(""" Could not connect to Maya. Please ensure Maya is running. If this is your first time using the debug adapter, ensure the MAYA_MODULE_PATH environment variable is set correctly (ie contains {0}), then restart Maya and try again. """.format(module_path + separator)) # Then start the Maya debugging threads run(start_debugging, ((config['debugpy']['host'], int(config['debugpy']['port'])), ))
def write_gml(graph, fname, colormap={}): "Custom gml exporter" fp = open(fname, 'wt') text = ['graph [', 'directed 1'] nodepatt = 'node [ id %(node)s label "%(node)s" graphics [ fill "%(color)s" w 40 h 30 x %(x)s y %(y)s type "ellipse" ]]' rnd = random.randint for node in graph.nodes(): x, y = rnd(50, 200), rnd(50, 200) color = colormap.get(node, '#CCCCFF') param = dict(node=node, x=x, y=y, color=color) text.append(nodepatt % param) edgepatt = 'edge [ source %(source)s target %(target)s graphics [ fill "%(color)s" targetArrow "delta" ]]' for source, target in graph.edges(): pair = (source, target) color = colormap.get(pair, '#000000') param = dict(source=source, target=target, color=color) text.append(edgepatt % param) text.append(']') fp.write(util.join(text, sep="\n")) fp.close()
def post(self, party_id, user_id): p_id = int(party_id) u_id = int(user_id) self.write(util.join(p_id, u_id))
def transform(mat, *t): tex = (1,) + t vector = numpy.array([tex]).T return tuple(util.join((mat * vector).tolist()))
def ParseCommand(self, cmd, name, line): ################# General m = re.compile('^_print\((?P<str>.*)\)$').match(cmd) if m != None: return m.group('str') + "\n" m = re.compile('^_exists file\((?P<filename>.*)\)').match(cmd) if m != None: if os.path.exists(m.group('filename')): return "true\n" return "false\n" m = re.compile('^_delete file\((?P<filename>.*)\)').match(cmd) if m != None: try: os.unlink(m.group('filename')) except: pass return "" ################# VDMProject # VDMProject.New () = new project if re.compile('^new project$').match(cmd) != None: self.vdmProject.New() return "" # VDMProject.Open () = open project (filename) m = re.compile('^open project\((?P<filename>.*)\)$').match(cmd) if m != None: self.vdmProject.Open(m.group('filename')) return "" # VDMProject.SaveAs (filename) = save as (filename) m = re.compile('^save$').match(cmd) if m != None: self.vdmProject.Save() return "" # VDMProject.SaveAs (filename) = save as (filename) m = re.compile('^save as\((?P<filename>.*)\)$').match(cmd) if m != None: self.vdmProject.SaveAs(m.group('filename')) return "" # VDMProject.GetModules () = get modules if re.compile('^get modules$').match(cmd) != None: (res, modules) = self.vdmProject.GetModules() return util.join(modules, "\n") + "\n" # VDMProject.GetFiles () = get files if re.compile('^get files$').match(cmd) != None: (res, files) = self.vdmProject.GetFiles() return util.join(files, "\n") + "\n" # VDMProject.AddFile = add file (filename) m = re.compile('^add file\((?P<filename>.*)\)$').match(cmd) if m != None: self.vdmProject.AddFile(m.group('filename')) return "" # VDMProject.RemoveFile = remove file (filename) m = re.compile('^remove file\((?P<filename>.*)\)$').match(cmd) if m != None: self.vdmProject.RemoveFile(m.group('filename')) return "" ############## VDMModuleRepos # VDMModuleRepos.FilesOfModule = files of module (modulename) m = re.compile('^files of module\((?P<modulename>.*)\)$').match(cmd) if m != None: (res, flist) = self.vdmModuleRepos.FilesOfModule(m.group('modulename')) return util.join(flist, "\n") + "\n" # VDMModuleRepos.GetCurrentModule = current module if re.compile('^current module$').match(cmd) != None: res = self.vdmModuleRepos.GetCurrentModule() + "\n" return res # VDMModuleRepos.GetCurrentModule = pop module if re.compile('^pop module$').match(cmd) != None: self.vdmModuleRepos.PopModule() return "" # VDMModuleRepos.PushModule = push module (modulename) m = re.compile('^push module\((?P<modulename>.*)\)$').match(cmd) if m != None: self.vdmModuleRepos.PushModule(m.group('modulename')) return "" # VDMModuleRepos.Status = status (modulename) m = re.compile('^status\((?P<modulename>.*)\)$').match(cmd) if m != None: status = self.vdmModuleRepos.Status(m.group('modulename')) resStr = "" if status.SyntaxChecked: resStr = resStr + "S" else: resStr = resStr + "-" if status.TypeChecked: resStr = resStr + "T" else: resStr = resStr + "-" if status.CodeGenerated: resStr = resStr + "C" else: resStr = resStr + "-" if status.PrettyPrinted: resStr = resStr + "P" else: resStr = resStr + "-" return resStr + "\n" # VDMModuleRepos.SuperClasses = superclasses (classname) m = re.compile('^superclasses\((?P<classname>.*)\)$').match(cmd) if m != None: (res, clist) = self.vdmModuleRepos.SuperClasses(m.group('classname')) return util.join(clist, "\n") + "\n" # VDMModuleRepos.SubClasses = subclasses (classname) m = re.compile('^subclasses\((?P<classname>.*)\)$').match(cmd) if m != None: (res, clist) = self.vdmModuleRepos.SubClasses(m.group('classname')) return util.join(clist, "\n") + "\n" # VDMModuleRepos.UsesOf = usesof (classname) m = re.compile('^usesof\((?P<classname>.*)\)$').match(cmd) if m != None: (res, clist) = self.vdmModuleRepos.UsesOf(m.group('classname')) return util.join(clist, "\n") + "\n" # VDMModuleRepos.UsedBy = usedby (classname) m = re.compile('^usedby\((?P<classname>.*)\)$').match(cmd) if m != None: (res, clist) = self.vdmModuleRepos.UsedBy(m.group('classname')) return util.join(clist, "\n") + "\n" ########### VDMInterpreter # VDMInterpreter.DynTypeCheck = set dtc [on|off] m = re.compile('^set dtc ((on)|(off))$').match(cmd) if m != None: if m.group(1) == 'on': self.vdmInterpreter._set_DynTypeCheck(true) else: self.vdmInterpreter._set_DynTypeCheck(false) return "" # VDMInterpreter.DynInvCheck = set inv [on|off] m = re.compile('^set inv ((on)|(off))$').match(cmd) if m != None: if m.group(1) == 'on': self.vdmInterpreter._set_DynInvCheck(true) else: self.vdmInterpreter._set_DynInvCheck(false) return "" # VDMInterpreter.DynPreCheck = set pre [on|off] m = re.compile('^set pre ((on)|(off))$').match(cmd) if m != None: if m.group(1) == 'on': self.vdmInterpreter._set_DynPreCheck(true) else: self.vdmInterpreter._set_DynPreCheck(false) return "" # VDMInterpreter.DynPostCheck = set post [on|off] m = re.compile('^set post ((on)|(off))$').match(cmd) if m != None: if m.group(1) == 'on': self.vdmInterpreter._set_DynPostCheck(true) else: self.vdmInterpreter._set_DynPostCheck(false) return "" # VDMInterpreter.PPOfValues = set ppr [on|off] m = re.compile('^set ppr ((on)|(off))$').match(cmd) if m != None: if m.group(1) == 'on': self.vdmInterpreter._set_PPOfValues(true) else: self.vdmInterpreter._set_PPOfValues(false) return "" # VDMInterpreter.Verbose = set verb [on|off] m = re.compile('^set verb ((on)|(off))$').match(cmd) if m != None: if m.group(1) == 'on': self.vdmInterpreter._set_Verbose(true) else: self.vdmInterpreter._set_Verbose(false) return "" # VDMInterpreter.Debug = set dbg [on|off] m = re.compile('^set ppr ((on)|(off))$').match(cmd) if m != None: if m.group(1) == 'on': self.vdmInterpreter._set_Debug(true) else: self.vdmInterpreter._set_Debug(false) return "" # VDMInterpreter.Initialize = init if re.compile('^init$').match(cmd) != None: self.vdmInterpreter.Initialize() return "" # VDMInterpreter.EvalExpression = eval (expression) m = re.compile('^eval\((?P<expression>.*)\)$').match(cmd) if m != None: res = self.vdmInterpreter.EvalExpression(self.clientID, m.group('expression')) return res.ToAscii() + "\n" # VDMInterpreter.EvalExpression = apply (expression) to [args] m = re.compile('^apply\((?P<expression>.*)\)to\[(?P<arg>.*)\]$').match( cmd) if m != None: arg_str = "[" + m.group('arg') + "]" arg_eval = self.vdmInterpreter.EvalExpression( self.clientID, arg_str) expr_str = m.group('expression') res = self.vdmInterpreter.Apply(self.clientID, expr_str, arg_eval) return res.ToAscii() + "\n" # VDMInterpreter.EvalCmd = evalcmd (command) m = re.compile('^evalcmd\((?P<command>.*)\)$').match(cmd) if m != None: self.vdmInterpreter.EvalCmd(m.group('command')) return "" # VDMInterpreter.SetBreakPointByPos = break (filename, line, col) m = re.compile( '^break\((?P<filename>.*?),(?P<line>\d+),(?P<col>\d+)\)$').match( cmd) if m != None: num = self.vdmInterpreter.SetBreakPointByPos (m.group ('filename'), \ locale.atoi (m.group ('line')), \ locale.atoi (m.group ('col'))) return str(num) + "\n" # VDMInterpreter.SetBreakPointByName = break (modulename, func) m = re.compile( '^break\((?P<modulename>.*?),(?P<func>[a-zA-Z][\w`]*)\)$').match( cmd) if m != None: num = self.vdmInterpreter.SetBreakPointByName (m.group ('modulename'), \ m.group ('func')) return str(num) + "\n" # VDMInterpreter.DeleteBreakPoint = delete (number) m = re.compile('^delete\((?P<number>\d+)\)$').match(cmd) if m != None: self.vdmInterpreter.DeleteBreakPoint(locale.atoi( m.group('number'))) return "" # VDMInterpreter.StartDebugging = debug (expression) m = re.compile('^debug\((?P<expression>.*)\)$').match(cmd) if m != None: res = self.vdmInterpreter.StartDebugging(self.clientID, m.group('expression')) return res.ToAscii() + "\n" # VDMInterpreter.DebugStep = step m = re.compile('^step$').match(cmd) if m != None: res = self.vdmInterpreter.DebugStep(self.clientID) return res.ToAscii() + "\n" # VDMInterpreter.DebugStepIn = stepin m = re.compile('^stepin$').match(cmd) if m != None: res = self.vdmInterpreter.DebugStepIn(self.clientID) return res.ToAscii() + "\n" # VDMInterpreter.DebugSingleStep = singlestep m = re.compile('^singlestep$').match(cmd) if m != None: res = self.vdmInterpreter.DebugSingleStep(self.clientID) return res.ToAscii() + "\n" # VDMInterpreter.DebugContinue = cont m = re.compile('^cont$').match(cmd) if m != None: res = self.vdmInterpreter.DebugStep(self.clientID) return res.ToAscii() + "\n" ########## VDMCodeGenerator # VDMCodeGenerator.GeneratePosInfo = set genposinfo [on|off] m = re.compile('^set genposinfo ((on)|(off))$').match(cmd) if m != None: if m.group(1) == 'on': self.vdmCodeGenerator._set_GeneratePosInfo(true) else: self.vdmCodeGenerator._set_GeneratePosInfo(false) return "" # VDMCodeGenerator.CodeGenerateList = codegen [java|cpp] ([filename, filename, ...]) m = re.compile( '^codegen (?P<lang>(java)|(cpp))\(\[(?P<modulenames>.*)\]\)$' ).match(cmd) if m != None: mlist = m.group('modulenames').split(",") if m.group('lang') == "java": res = self.vdmCodeGenerator.GenerateCodeList( mlist, ToolboxAPI.VDMCodeGenerator.JAVA) else: res = self.vdmCodeGenerator.GenerateCodeList( mlist, ToolboxAPI.VDMCodeGenerator.CPP) if res: return "true\n" return "false\n" # VDMCodeGenerator.CodeGenerate = codegen [java|cpp] (filename) m = re.compile('^codegen (?P<lang>(java)|(cpp))\((?P<modulename>.*)\)$' ).match(cmd) if m != None: if m.group('lang') == "java": res = self.vdmCodeGenerator.GenerateCode( m.group('modulename'), ToolboxAPI.VDMCodeGenerator.JAVA) else: res = self.vdmCodeGenerator.GenerateCode( m.group('modulename'), ToolboxAPI.VDMCodeGenerator.CPP) if res: return "true\n" return "false\n" ########## VDMParser # VDMParser.ParseList = parse ([filename, filename, ...]) m = re.compile('^parse\(\[(?P<filenames>.*)\]\)$').match(cmd) if m != None: flist = m.group('filenames').split(",") res = self.vdmParser.ParseList(flist) if res: return "true\n" return "false\n" # VDMParser.Parse = parse (filename) m = re.compile('^parse\((?P<filename>.*)\)$').match(cmd) if m != None: parse_arg = m.group('filename') res = self.vdmParser.Parse(parse_arg) if res: return "true\n" else: return "false\n" # VDMTypeChecker # VDMTypeChecker.TypeCheckList = typecheck [pos|def] ([modulename, modulename, ...]) m = re.compile( '^typecheck (?P<posdef>(pos)|(def))\(\[(?P<modulenames>.*)\]\)$' ).match(cmd) if m != None: mlist = m.group('modulenames').split(",") if m.group('posdef') == "pos": self.vdmTypeChecker._set_DefTypeCheck(false) else: self.vdmTypeChecker._set_DefTypeCheck(true) res = self.vdmTypeChecker.TypeCheckList(mlist) if res: return "true\n" return "false\n" # VDMTypeChecker.TypeCheck = typecheck [pos|def] (filename) m = re.compile( '^typecheck (?P<posdef>(pos)|(def))\((?P<modulename>.*)\)$').match( cmd) if m != None: if m.group('posdef') == "pos": self.vdmTypeChecker._set_DefTypeCheck(false) else: self.vdmTypeChecker._set_DefTypeCheck(true) res = self.vdmTypeChecker.TypeCheck(m.group('modulename')) if res: return "true\n" return "false\n" # VDMTypeChecker.ExtendedTypeCheck = set full [on|off] m = re.compile('^set full ((on)|(off))$').match(cmd) if m != None: if m.group(1) == 'on': self.vdmTypeChecker._set_ExtendedTypeCheck(true) else: self.vdmTypeChecker._set_ExtendedTypeCheck(false) return "" ############# VDMPrettyPrinter # VDMPrettyPrinter.PrettyPrintList = prettyprint ([filename, filename, ...]) m = re.compile('^prettyprint\(\[(?P<filenames>.*)\]\)$').match(cmd) if m != None: flist = m.group('filenames').split(",") res = self.vdmPrettyPrinter.PrettyPrintList(flist) if res: return "true\n" return "false\n" # VDMPrettyPrinter.PrettyPrint = prettyprint (filename) m = re.compile('^prettyprint\((?P<filename>.*)\)$').match(cmd) if m != None: res = self.vdmPrettyPrinter.PrettyPrint(m.group('filename')) if res: return "true\n" else: return "false\n" ############ VDMError # VDMErrors.GetErrors = get errors if re.compile('^get errors$').match(cmd) != None: (n, errList) = self.vdmErrors.GetErrors() res = "" for err in errList: res = res + err.fname + " [" + str(err.line) + "," + str( err.col) + "] " + err.msg + "\n" return res + "Errors: " + str(n) + "\n" # VDMErrors.GetWarnings = get warnings if re.compile('^get warnings$').match(cmd) != None: (n, errList) = self.vdmErrors.GetWarnings() res = "" for err in errList: res = res + err.fname + " [" + str(err.line) + "," + str( err.col) + "] " + err.msg + "\n" return res + "Warnings: " + str(n) + "\n" raise Syntax("Unknown command")
def write_score(self): self.create_chords() self.score["treble"] = [] self.score["bass"] = [] sections = {} def triplet_bar(note_pair, bars=1): return triplets(rep(notes(note_pair, 8), int(6 * bars))) def doublet_bar(note_pair, bars=1): return rep(notes(note_pair, 8), int(4 * bars)) def A_motif(chord, bars, *tweaks): motif = {} if 'no treble' in tweaks: motif['treble'] = rep(rest(1), bars) elif chord == self.aI: motif['treble'] = triplet_bar(pattern(chord, [6, 5]), bars=bars) elif chord == self.aii and 'low triplets' in tweaks: motif['treble'] = triplet_bar(pattern(chord, [5, 4]), bars=bars) else: motif['treble'] = triplet_bar(pattern(chord, [6, 4]), bars=bars) motif['bass1'] = doublet_bar(pattern(chord, 2, 3), bars=bars) if 'crotchet bass' in tweaks: motif['bass2'] = rep(note(select(chord, 1), 4), int(bars * 4)) else: motif['bass2'] = rep(note(select(chord, 1), 1, phrasing="~"), bars) if 'extend tie' not in tweaks: motif['bass2'][-1].phrasing = "" if 'low first' in tweaks: motif['bass1'][0] = chord[0] motif['bass2'] = self.transpose(motif['bass2'], -9, "scale") return motif sections['A0'] = join(A_motif(self.aI, 2, 'no treble'), A_motif(self.aiii, 2, 'no treble', 'low first')) sections['A0']['treble'] = time_signature('4/4', sections['A0']['treble']) sections['A1'] = join(A_motif(self.aI, 2), A_motif(self.aiii, 2)) sections['A2'] = join(A_motif(self.aI, 1), A_motif(self.aiii7, 0.5, 'crotchet bass'), A_motif(self.aI, 0.5, 'crotchet bass'), A_motif(self.aiii, 2)) sections['A3'] = join( A_motif(self.aii, 1, 'low triplets', 'extend tie'), A_motif(self.aii, 1), A_motif(self.aI, 2)) sections['A4'] = join(A_motif(self.aii, 1, 'low triplets'), A_motif(self.aii7, 0.5, 'crotchet bass'), A_motif(self.aii, 0.5, 'crotchet bass'), A_motif(self.aI, 2)) A = ['A1', 'A1', 'A2', 'A2', 'A2', 'A3', 'A3', 'A4'] def arpeggio_bar(arp, bars): return rep(notes(pattern(arp, 1, 2, 3, 4, 3, 2), 16), int(4 * bars)) def altpeggio_bar(arp, bars): return time_signature( "14/8", rep( notes( pattern(arp, 1, 2, 3, 4, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2), 16), 2)) def B_motif(chord, bars, *tweaks): motif = {} if 'alt' not in tweaks: motif['treble'] = arpeggio_bar(subset(chord, 5, 8), bars=bars) motif['bass'] = arpeggio_bar(subset(chord, 4, 1), bars=bars) else: motif['treble'] = altpeggio_bar(subset(chord, 5, 8), bars=bars) motif['bass'] = altpeggio_bar(subset(chord, 4, 1), bars=bars) return motif sections['B1'] = join(B_motif(self.bI7, 2), B_motif(self.biii, 1), B_motif(self.biii, 1, 'alt')) sections['B2'] = join(B_motif(self.bI7, 1), B_motif(self.biii7, 0.5), B_motif(self.bI7, 0.5), B_motif(self.biii, 1), B_motif(self.biii, 1, 'alt')) sections['B3'] = join(B_motif(self.bii7, 2), B_motif(self.bI7, 1), B_motif(self.bI7, 1, 'alt')) sections['B4'] = join(B_motif(self.bii7, 1), B_motif(self.bii7d5, 1), B_motif(self.bI7, 1), B_motif(self.bI7, 1, 'alt')) sections['B1']['treble'][0] = time_signature( '12/8', sections['B1']['treble'][0])[0] sections['B2']['treble'][0] = time_signature( '12/8', sections['B2']['treble'][0])[0] sections['B3']['treble'][0] = time_signature( '12/8', sections['B3']['treble'][0])[0] sections['B4']['treble'][0] = time_signature( '12/8', sections['B4']['treble'][0])[0] B = ['B1', 'B1', 'B2', 'B2', 'B2', 'B3', 'B3', 'B4'] def combine(lh, rh): return { 'treble': rep(triplets(subset(sections[rh]['treble'], 7, 12)), 8) + rep(triplets(subset(sections[rh]['treble'], 55, 60)), 8), 'bass1': sections[lh]['bass1'], 'bass2': sections[lh]['bass2'], } sections['C1'] = combine('A1', 'B1') sections['C1']['treble'][ 0].prefix = '\\time 4/4 ' + sections['C1']['treble'][0].prefix sections['C2'] = combine('A2', 'B2') sections['C3'] = combine('A3', 'B3') sections['C4'] = combine('A4', 'B4') C = ['C1', 'C1', 'C2', 'C2', 'C2', 'C3', 'C3', 'C4'] def D_motif(chord, section): motif = {} motif['bass1'] = sections[section]['bass1'] motif['bass2'] = sections[section]['bass2'] if len(chord) == 3: motif['treble'] = self.harmonize( tied_note(chord[0], [1, 2]) + note(chord[1], 2) + tied_note(chord[2], [1, 1]), 1, 'octave') else: motif['treble'] = self.harmonize( tied_note(chord[0], [1, 1]) + tied_note(chord[1], [1, 1]), 1, 'octave') return motif sections['D1'] = D_motif(self.diii, 'A1') sections['D2'] = D_motif(self.diii, 'A2') sections['D3'] = D_motif(self.dI * 2, 'A3') sections['D4'] = D_motif(self.dii, 'A4') D = ['D1', 'D1', 'D2', 'D2', 'D2', 'D3', 'D3', 'D4'] for section in sections: sections[section]['treble'][0].markup = section order = [A, A, B, C, C, B, A, A, B, D, D] structure = ['A0'] for item in order: structure += [item, 'A0'] if self.summary: sections_to_print = sections else: sections_to_print = flatten(structure) for section in sections_to_print: self.score["treble"] += sections[section]['treble'] + ["\\break\n"] if 'B' not in section: self.score['bass'] += voices(sections[section]['bass1'], sections[section]['bass2']) else: self.score['bass'] += sections[section]['bass']
def run(archivo_entrada, tiempo, directorio_salida, variables, show, export, tiempoTotal, epochs=1, estabaEntrenando=True): start_time = datetime.datetime.now() print("The process has started: {}".format(getTime(start_time))) for epoch in range(1, epochs + 1): print("------------------------------------------------------Epoch {}". format(epoch)) # Numero de pasos en el tiempo a ejecutar nt = tiempo ct = (np.arange(1, nt)) C = _read_config_file(archivo_entrada) # Discretizacion en el espacio dx = C['wd'][1, 0] - C['wd'][0, 0] # velocidad del agua en cada punto de monitoreo va = C['wv'][:, 1] # coeficiente de difusión en cada punto de monitoreo cd = np.zeros(len(va)) + variables['Diff'] v = np.zeros(len(va)) + np.mean(C['wv']) D = np.mean(C['wd']) # Condiciones de Frontera bc = dict() for k, col in config.BC_COLUMNS.items(): bc[k] = C['bc'][:, col] kcondt = 1.92 bc['T'] = bc['T'] + 273.15 bc['pH'] = 10**(-1 * bc['pH']) # Condiciones Iniciales Ci = dict() for k, col in config.BC_COLUMNS.items(): Ci['ci_{}'.format(k)] = C['ic'][:, col] Ci['ci_T'] = Ci['ci_T'] + 273.15 Ci['ci_pH'] = 10**(-1 * Ci['ci_pH']) mcon = dict() for var in config.VARIABLES: mcon[var] = np.empty((nt, np.size(Ci['ci_{}'.format(var)], axis=0))) mcon[var][0, :] = Ci['ci_{}'.format(var)] for var in config.VARIABLES: C['S{}'.format(var)] = C['S{}'.format(var)][:, 1:] Cout = dict() #print("Total Iterations = {}".format(nt)) for i in range(1, nt): sys.stdout.write( "\r\tDoing iteration {}, {:.2f}% Completed ".format( i, i / nt * 100)) muestra = int(i / 3600) for k in config.BC_COLUMNS.keys(): Ci['ci_{}'.format(k)][0] = bc[k][muestra] S = dict() for var in config.VARIABLES: S['S_{}'.format(var)] = C['S{}'.format(var)][:, muestra] # Evolución de la concentración para t + dt Cout, paso_t = calidad_explicito(D, dx, Ci, S, v, cd, C['Caudales'], variables) # Se guardan las concentraciones del momento t+dt for var in config.VARIABLES: mcon[var][i, :] = Cout['c_{}'.format(var)] # Actualizar condición inicial for var in config.VARIABLES: Ci['ci_{}'.format(var)] = Cout['c_{}'.format(var)] mconConduct = kcondt * mcon['TDS'] mcon['T'] = mcon['T'] - 273.15 mcon['pH'] = (np.log10(mcon['pH'])) * (-1) Cout['c_pH'] = (np.log10(Cout['c_pH'])) * (-1) #print("Guardando datos de salida...") #print( "Shape OD array = {}".format( str(mcon['pH'].shape) )) #print( "Shape OD array = {}".format( str(mcon['pH'][0::3600, :].shape) )) book = xlwt.Workbook() for var in config.VARIABLES: """ take data each 3600 samples mcon[0::3600, :] """ data = mcon[var][0::3600, :] data = np.vstack([data, data.mean(axis=0)]) save_sheet(book, var, data) save_sheet(book, 'Conduct', mconConduct[0::3600, :]) used_vars(book, variables) name = join(directorio_salida, "Resultados.xlsx") book.save(name) """ Compute error """ errores, types, sign_dependency, to_optimize = computeError(name) """ Here update the values according the error, {gradiente descendente} """ learning_rate = 1 for var in to_optimize: variables[var] += types[var] * errores[var] * variables[ var] * learning_rate * sign_dependency[var] * -1 print_state_variables(variables, to_optimize) end_time = datetime.datetime.now() print("\nThe process has finished at: {}".format(getTime(end_time))) elapsed_time = end_time - start_time elapsed_time = str( datetime.timedelta(seconds=elapsed_time.total_seconds())) print("Total Time: {}".format(elapsed_time)) if (estabaEntrenando): print("Final Run: ") run(archivo_entrada, tiempoTotal, directorio_salida, variables, show, export, tiempoTotal, epochs=3, estabaEntrenando=False) else: book = xlwt.Workbook() used_vars(book, variables) name = "FOptimize/Variables.xlsx" book.save(name) print("El proceso ha finalizado.")
def tag_error(tagList): return """ Iiih, kakak! Ga boleh ngepost gambar """ + util.join(tagList, ", ", " sama ") + """ di sini, tau!