def ensure_explicit_namespace(fpath, namespace, varname_list): import re import utool as ut text = ut.read_from(fpath) orig_text = text new_text = text for varname in varname_list: regex = ''.join(( ut.named_field('prefix', '[^.]'), ut.named_field('var', ut.whole_word(varname)), )) repl = ''.join(( ut.bref_field('prefix'), namespace, '.', ut.bref_field('var') )) new_text = re.sub(regex, repl, new_text) textdiff = ut.get_textdiff(orig_text, new_text) print(textdiff) if ut.user_cmdline_prompt('Does the text look good?'): # if diff looks good write ut.write_to(fpath, new_text)
def fix_section_common_errors(tex_fpath, dryrun=True): # Read in text and ensure ascii format text = ut.read_from(tex_fpath) new_text = text # Fix all capitals search_repl_list = constants_tex_fixes.CAPITAL_LIST for repl in search_repl_list: pattern = ut.regex_word(re.escape(repl)) new_text = re.sub(pattern, repl, new_text, flags=re.IGNORECASE) #new_text = re.sub(pattern, fix_capitalization, text, flags=re.MULTILINE) if not dryrun: ut.write_to(tex_fpath, new_text) else: ut.print_difftext(ut.get_textdiff(text, new_text, 0))
def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None): """ Helper that write a file if -w is given on command line, otherwise it just prints it out. It has the opption of comparing a diff to the file. """ import utool as ut dowrite = ut.get_argflag(('-w', '--write')) show_diff = ut.get_argflag('--diff') num_context_lines = ut.get_argval('--diff', type_=int, default=None) show_diff = show_diff or num_context_lines is not None num_context_lines = ut.get_argval('--diff', type_=int, default=None) if fullprint is None: fullprint = True if fullprint is False: fullprint = ut.get_argflag('--print') print('[autogen] Autogenerated %s...\n+---\n' % (fpath,)) if not dowrite: if fullprint: ut.print_code(autogen_text, lexer_name=codetype) print('\nL___') else: print('specify --print to write to stdout') pass print('specify -w to write, or --diff to compare') print('...would write to: %s' % fpath) if show_diff: if ut.checkpath(fpath, verbose=True): prev_text = ut.read_from(fpath) textdiff = ut.get_textdiff(prev_text, autogen_text, num_context_lines=num_context_lines) try: ut.print_difftext(textdiff) except UnicodeDecodeError: import unicodedata textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore') ut.print_difftext(textdiff) if dowrite: print('WARNING: Not writing. Remove --diff from command line') elif dowrite: ut.write_to(fpath, autogen_text)
def test_example(): """ Example: >>> # DISABLE_DOCTEST >>> from utool.experimental.bytecode_optimizations import * >>> from utool.experimental import bytecode_optimizations as bo >>> dis1 = bo.get_disassembly_string(bo.sample) >>> dis2 = bo.get_disassembly_string(bo.sample_normal) >>> diff = ut.get_textdiff(dis1, dis2) >>> print(diff) >>> if len(diff) == 0: ... print('no difference') """ import utool as ut sample_optimized = make_constants(verbose=True, stoplist=['random'])(sample_normal) opt_dis = get_disassembly_string(sample_optimized) norm_dis = get_disassembly_string(sample_normal) #norm2_dis = get_disassembly_string(sample) # hmm no difference print(ut.get_textdiff(opt_dis, norm_dis))
def ensure_explicit_namespace(fpath, namespace, varname_list): import re import utool as ut text = ut.read_from(fpath) orig_text = text new_text = text for varname in varname_list: regex = ''.join(( ut.named_field('prefix', '[^.]'), ut.named_field('var', ut.whole_word(varname)), )) repl = ''.join( (ut.bref_field('prefix'), namespace, '.', ut.bref_field('var'))) new_text = re.sub(regex, repl, new_text) textdiff = ut.get_textdiff(orig_text, new_text) print(textdiff) if ut.user_cmdline_prompt('Does the text look good?'): # if diff looks good write ut.write_to(fpath, new_text)
pattern = re.escape(pat) found_lines, found_lxs = ut.grepfile(fpath, pattern) # DID NOT FIND ENCODING LINE if len(found_lines) == 0: need_encoding_fpaths.append(fpath) print('The following fpaths need encoding lines: ' + ut.list_str(need_encoding_fpaths, strvals=True)) if do_write or show_diff: for fpath in need_encoding_fpaths: print('\n-----------------\nFound file without encodeing line: ' + fpath) line_list = ut.read_lines_from(fpath) linenum = find_encoding_insert_position(line_list) if linenum is not None: #print(' * linenum = %r' % (linenum,)) new_lines = line_list[:linenum] + [encoding_line + '\n' ] + line_list[linenum:] new_text = ''.join(new_lines) if show_diff: old_text = ''.join(line_list) textdiff = ut.get_textdiff(old_text, new_text, num_context_lines=1) print('Diff:') ut.print_difftext(textdiff) if do_write: ut.writeto(fpath, new_text) pass
def update_bindings(): r""" Returns: dict: matchtups CommandLine: python ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings utprof.py ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings Example: >>> # DISABLE_DOCTEST >>> from autogen_bindings import * # NOQA >>> import sys >>> import utool as ut >>> sys.path.append(ut.truepath('~/local/build_scripts/flannscripts')) >>> matchtups = update_bindings() >>> result = ('matchtups = %s' % (ut.repr2(matchtups),)) >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ from os.path import basename import difflib import numpy as np import re binding_names = [ 'build_index', 'used_memory', 'add_points', 'remove_point', 'compute_cluster_centers', 'load_index', 'save_index', 'find_nearest_neighbors', 'radius_search', 'remove_points', 'free_index', 'find_nearest_neighbors_index', # 'size', # 'veclen', # 'get_point', # 'flann_get_distance_order', # 'flann_get_distance_type', # 'flann_log_verbosity', # 'clean_removed_points', ] _places = [ '~/code/flann/src/cpp/flann/flann.cpp', '~/code/flann/src/cpp/flann/flann.h', '~/code/flann/src/python/pyflann/flann_ctypes.py', '~/code/flann/src/python/pyflann/index.py', ] eof_sentinals = { # 'flann_ctypes.py': '# END DEFINE BINDINGS', 'flann_ctypes.py': 'def ensure_2d_array(arr', # 'flann.h': '// END DEFINE BINDINGS', 'flann.h': '#ifdef __cplusplus', 'flann.cpp': None, 'index.py': None, } block_sentinals = { 'flann.h': re.escape('/**'), 'flann.cpp': 'template *<typename Distance>', # 'flann_ctypes.py': '\n', 'flann_ctypes.py': 'flann\.[a-z_.]* =', # 'index.py': ' def .*', 'index.py': ' [^ ].*', } places = { basename(fpath): fpath for fpath in ut.lmap(ut.truepath, _places) } text_dict = ut.map_dict_vals(ut.readfrom, places) lines_dict = {key: val.split('\n') for key, val in text_dict.items()} orig_texts = text_dict.copy() # NOQA binding_defs = {} named_blocks = {} print('binding_names = %r' % (binding_names, )) for binding_name in binding_names: blocks, defs = autogen_parts(binding_name) binding_defs[binding_name] = defs named_blocks[binding_name] = blocks for binding_name in ut.ProgIter(binding_names): ut.colorprint('+--- GENERATE BINDING %s -----' % (binding_name, ), 'yellow') blocks_dict = named_blocks[binding_name] for key in places.keys(): ut.colorprint( '---- generating %s for %s -----' % ( binding_name, key, ), 'yellow') # key = 'flann_ctypes.py' # print(text_dict[key]) old_text = text_dict[key] line_list = lines_dict[key] #text = old_text block = blocks_dict[key] debug = ut.get_argflag('--debug') # debug = True # if debug: # print(ut.highlight_code(block, splitext(key)[1])) # Find a place in the code that already exists searchblock = block if key.endswith('.cpp') or key.endswith('.h'): searchblock = re.sub(ut.REGEX_C_COMMENT, '', searchblock, flags=re.MULTILINE | re.DOTALL) searchblock = '\n'.join(searchblock.splitlines()[0:3]) # @ut.cached_func(verbose=False) def cached_match(old_text, searchblock): def isjunk(x): return False return x in ' \t,*()' def isjunk2(x): return x in ' \t,*()' # Not sure why the first one just doesnt find it # isjunk = None sm = difflib.SequenceMatcher(isjunk, old_text, searchblock, autojunk=False) sm0 = difflib.SequenceMatcher(isjunk, old_text, searchblock, autojunk=True) sm1 = difflib.SequenceMatcher(isjunk2, old_text, searchblock, autojunk=False) sm2 = difflib.SequenceMatcher(isjunk2, old_text, searchblock, autojunk=True) matchtups = (sm.get_matching_blocks() + sm0.get_matching_blocks() + sm1.get_matching_blocks() + sm2.get_matching_blocks()) return matchtups matchtups = cached_match(old_text, searchblock) # Find a reasonable match in matchtups found = False if debug: # print('searchblock =\n%s' % (searchblock,)) print('searchblock = %r' % (searchblock, )) for (a, b, size) in matchtups: matchtext = old_text[a:a + size] pybind = binding_defs[binding_name]['py_binding_name'] if re.search(binding_name + '\\b', matchtext) or re.search( pybind + '\\b', matchtext): found = True pos = a + size if debug: print('MATCHING TEXT') print(matchtext) break else: if debug and 0: print('Not matching') print('matchtext = %r' % (matchtext, )) matchtext2 = old_text[a - 10:a + size + 20] print('matchtext2 = %r' % (matchtext2, )) if found: linelens = np.array(ut.lmap(len, line_list)) + 1 sumlen = np.cumsum(linelens) row = np.where(sumlen < pos)[0][-1] + 1 #print(line_list[row]) # Search for extents of the block to overwrite block_sentinal = block_sentinals[key] row1 = ut.find_block_end(row, line_list, block_sentinal, -1) - 1 row2 = ut.find_block_end(row + 1, line_list, block_sentinal, +1) eof_sentinal = eof_sentinals[key] if eof_sentinal is not None: print('eof_sentinal = %r' % (eof_sentinal, )) row2 = min([ count for count, line in enumerate(line_list) if line.startswith(eof_sentinal) ][-1], row2) nr = len((block + '\n\n').splitlines()) new_line_list = ut.insert_block_between_lines( block + '\n', row1, row2, line_list) rtext1 = '\n'.join(line_list[row1:row2]) rtext2 = '\n'.join(new_line_list[row1:row1 + nr]) if debug: print('-----') ut.colorprint('FOUND AND REPLACING %s' % (binding_name, ), 'yellow') print(ut.highlight_code(rtext1)) if debug: print('-----') ut.colorprint( 'FOUND AND REPLACED WITH %s' % (binding_name, ), 'yellow') print(ut.highlight_code(rtext2)) if not ut.get_argflag('--diff') and not debug: print( ut.color_diff_text( ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True))) else: # Append to end of the file eof_sentinal = eof_sentinals[key] if eof_sentinal is None: row2 = len(line_list) - 1 else: row2_choice = [ count for count, line in enumerate(line_list) if line.startswith(eof_sentinal) ] if len(row2_choice) == 0: row2 = len(line_list) - 1 assert False else: row2 = row2_choice[-1] - 1 # row1 = row2 - 1 # row2 = row2 - 1 row1 = row2 new_line_list = ut.insert_block_between_lines( block + '\n', row1, row2, line_list) # block + '\n\n\n', row1, row2, line_list) rtext1 = '\n'.join(line_list[row1:row2]) nr = len((block + '\n\n').splitlines()) rtext2 = '\n'.join(new_line_list[row1:row1 + nr]) if debug: print('-----') ut.colorprint( 'NOT FOUND AND REPLACING %s' % (binding_name, ), 'yellow') print(ut.highlight_code(rtext1)) if debug: print('-----') ut.colorprint( 'NOT FOUND AND REPLACED WITH %s' % (binding_name, ), 'yellow') print(ut.highlight_code(rtext2)) if not ut.get_argflag('--diff') and not debug: print( ut.color_diff_text( ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True))) text_dict[key] = '\n'.join(new_line_list) lines_dict[key] = new_line_list ut.colorprint('L___ GENERATED BINDING %s ___' % (binding_name, ), 'yellow') for key in places: new_text = '\n'.join(lines_dict[key]) #ut.writeto(ut.augpath(places[key], '.new'), new_text) ut.writeto(ut.augpath(places[key]), new_text) for key in places: if ut.get_argflag('--diff'): difftext = ut.get_textdiff(orig_texts[key], new_text, num_context_lines=7, ignore_whitespace=True) difftext = ut.color_diff_text(difftext) print(difftext)
def make_core_layers(n_classes, freeze_before=0, finetune_decay=1): layers = [] # == # conv1 layers += conv_bn_relu(bot='data', suffix='1_1', nfilt=64) layers += conv_bn_relu(bot='conv1_1', suffix='1_2', nfilt=64) layers += [pool('conv1_2', 'pool1')] # ==== # conv2 layers += conv_bn_relu(bot='pool1', suffix='2_1', nfilt=128) layers += conv_bn_relu(bot='conv2_1', suffix='2_2', nfilt=128) layers += [pool('conv2_2', 'pool2')] # ====== # conv3 layers += conv_bn_relu(bot='pool2', suffix='3_1', nfilt=256) layers += conv_bn_relu(bot='conv3_1', suffix='3_2', nfilt=256) layers += conv_bn_relu(bot='conv3_2', suffix='3_3', nfilt=256) layers += [pool('conv3_3', 'pool3')] # ======== # conv4 layers += conv_bn_relu(bot='pool3', suffix='4_1', nfilt=512) layers += conv_bn_relu(bot='conv4_1', suffix='4_2', nfilt=512) layers += conv_bn_relu(bot='conv4_2', suffix='4_3', nfilt=512) layers += [pool('conv4_3', 'pool4')] # ========== # conv5 layers += conv_bn_relu(bot='pool4', suffix='5_1', nfilt=512) layers += conv_bn_relu(bot='conv5_1', suffix='5_2', nfilt=512) layers += conv_bn_relu(bot='conv5_2', suffix='5_3', nfilt=512) layers += [pool('conv5_3', 'pool5')] # ========== # up+conv5 layers += [upsample('pool5', '5', 2, width=30, height=23)] layers += conv_bn_relu(bot='pool5_D', suffix='5_3_D', nfilt=512) layers += conv_bn_relu(bot='conv5_3_D', suffix='5_2_D', nfilt=512) layers += conv_bn_relu(bot='conv5_2_D', suffix='5_1_D', nfilt=512) # ======== # up+conv4 layers += [upsample('conv5_1_D', '4', scale=2, width=60, height=45)] layers += conv_bn_relu(bot='pool4_D', suffix='4_3_D', nfilt=512) layers += conv_bn_relu(bot='conv4_3_D', suffix='4_2_D', nfilt=512) layers += conv_bn_relu(bot='conv4_2_D', suffix='4_1_D', nfilt=256) # ====== # up+conv3 layers += [upsample('conv4_1_D', '3', scale=2)] layers += conv_bn_relu(bot='pool3_D', suffix='3_3_D', nfilt=256) layers += conv_bn_relu(bot='conv3_3_D', suffix='3_2_D', nfilt=256) layers += conv_bn_relu(bot='conv3_2_D', suffix='3_1_D', nfilt=128) # ==== # up+conv2 layers += [upsample('conv3_1_D', '2', scale=2)] layers += conv_bn_relu(bot='pool2_D', suffix='2_2_D', nfilt=128) layers += conv_bn_relu(bot='conv2_2_D', suffix='2_1_D', nfilt=64) # == # up+conv1 layers += [upsample('conv2_1_D', '1', scale=2)] layers += conv_bn_relu(bot='pool1_D', suffix='1_2_D', nfilt=64) # output pixel labels layers += [ convolution('conv1_2_D', 'conv1_1_D_output' + str(n_classes), 'conv1_1_D_output' + str(n_classes), nfilt=n_classes) ] # 26 total convolutional layers # total_conv_layers = sum([layer.params['type'] == 'Convolution' # for layer in layers]) learnable_layers = [layer for layer in layers if 'lr_mult' in layer.params] total_lr_layers = len(learnable_layers) import numpy as np freeze_before_ = ( np.clip(freeze_before, -total_lr_layers, total_lr_layers) % total_lr_layers) # Freeze learning in all layers before `freeze_before_` for count, layer in enumerate(learnable_layers, start=1): if count < freeze_before_: # Force layers before this point to have a learning rate of 0 layer.params['lr_mult'] = 0 if finetune_decay != 1: # Decay so earlier layers recieve exponentially less weight layer.params['lr_mult'] *= (finetune_decay**(total_lr_layers - count)) # Freeze the learning rate of any layer before the freeze points core_layer_auto = '\n'.join([p.format() for p in layers]) if False: import utool as ut from pysseg.models import segnet_proper_orig core_layer_old = segnet_proper_orig.CORE_LAYERS.format( n_classes=n_classes) print( ut.color_diff_text( ut.get_textdiff(core_layer_old, core_layer_auto, num_context_lines=10))) return core_layer_auto
def fix_section_title_capitalization(tex_fpath, dryrun=True): # Read in text and ensure ascii format text = ut.read_from(tex_fpath) section_type_list = [ 'chapter', 'section', 'subsection', 'subsubsection', 'paragraph', ] re_section_type = ut.named_field('section_type', ut.regex_or(section_type_list)) re_section_title = ut.named_field('section_title', '[^}]*') re_spaces = ut.named_field('spaces', '^ *') pattern = re_spaces + re.escape( '\\') + re_section_type + '{' + re_section_title + '}' def fix_capitalization(match): dict_ = match.groupdict() section_title = dict_['section_title'] #if section_title == 'The Great Zebra Count': # return match.string[slice(*match.span())] # #return 'The Great Zebra Count' # general logic #words = section_title.split(' ') tokens = re.split(ut.regex_or([' ', '/']), section_title) #if 'Coverage' in section_title: # ut.embed() # pass #words = [word if count == 0 else word.lower() for count, word in enumerate(words)] #new_section_title = ' '.join(words) tokens = [ t if count == 0 else t.lower() for count, t in enumerate(tokens) ] new_section_title = ''.join(tokens) # hacks for caps of expanded titles search_repl_list = constants_tex_fixes.CAPITAL_TITLE_LIST for repl in search_repl_list: new_section_title = re.sub(re.escape(repl), repl, new_section_title, flags=re.IGNORECASE) # hacks fo acronyms for full, acro in constants_tex_fixes.ACRONYMN_LIST: new_section_title = re.sub(r'\b' + re.escape(acro) + r'\b', acro, new_section_title, flags=re.IGNORECASE) #'the great zebra and giraffe count' #new_section_title = section_title.lower() new_text = dict_['spaces'] + '\\' + dict_[ 'section_type'] + '{' + new_section_title + '}' VERBOSE = 0 if VERBOSE: old_text = match.string[slice(*match.span())] if new_text != old_text: print(ut.dict_str(dict_)) print('--- REPL ---') print(old_text) print(new_text) return new_text #for match in re.finditer(pattern, text, flags=re.MULTILINE): # fix_capitalization(match) new_text = re.sub(pattern, fix_capitalization, text, flags=re.MULTILINE) if not dryrun: ut.write_to(tex_fpath, new_text) else: ut.print_difftext(ut.get_textdiff(text, new_text, 0))
def fix_chktex(): """ ./texfix.py --fixcite --fix-chktex """ import parse fpaths = testdata_fpaths() print('Running chktex') output_list = [ ut.cmd('chktex', fpath, verbose=False)[0] for fpath in fpaths ] fixcite = ut.get_argflag('--fixcite') fixlbl = ut.get_argflag('--fixlbl') fixcmdterm = ut.get_argflag('--fixcmdterm') for fpath, output in zip(fpaths, output_list): text = ut.readfrom(fpath) buffer = text.split('\n') pat = '\n' + ut.positive_lookahead('Warning') warn_list = list( filter(lambda x: x.startswith('Warning'), re.split(pat, output))) delete_linenos = [] if not (fixcmdterm or fixlbl or fixcite): print(' CHOOSE A FIX ') modified_lines = [] for warn in warn_list: warnlines = warn.split('\n') pres = parse.parse( 'Warning {num} in {fpath} line {lineno}: {warnmsg}', warnlines[0]) if pres is not None: fpath_ = pres['fpath'] lineno = int(pres['lineno']) - 1 warnmsg = pres['warnmsg'] try: assert fpath == fpath_, ('%r != %r' % (fpath, fpath_)) except AssertionError: continue if 'No errors printed' in warn: #print('Cannot fix') continue if lineno in modified_lines: print('Skipping modified line') continue if fixcmdterm and warnmsg == 'Command terminated with space.': print('Fix command termination') errorline = warnlines[1] # NOQA carrotline = warnlines[2] pos = carrotline.find('^') if 0: print('pos = %r' % (pos, )) print('lineno = %r' % (lineno, )) print('errorline = %r' % (errorline, )) modified_lines.append(lineno) line = buffer[lineno] pre_, post_ = line[:pos], line[pos + 1:] newline = (pre_ + '{} ' + post_).rstrip(' ') #print('newline = %r' % (newline,)) buffer[lineno] = newline elif fixlbl and warnmsg == 'Delete this space to maintain correct pagereferences.': print('Fix label newline') fpath_ = pres['fpath'] errorline = warnlines[1] # NOQA new_prevline = buffer[ lineno - 1].rstrip() + errorline.lstrip(' ') buffer[lineno - 1] = new_prevline modified_lines.append(lineno) delete_linenos.append(lineno) elif fixcite and re.match( 'Non-breaking space \\(.~.\\) should have been used', warnmsg): #print(warnmsg) #print('\n'.join(warnlines)) print('Fix citation space') carrotline = warnlines[2] pos = carrotline.find('^') modified_lines.append(lineno) line = buffer[lineno] if line[pos] == ' ': pre_, post_ = line[:pos], line[pos + 1:] newline = (pre_ + '~' + post_).rstrip(' ') else: pre_, post_ = line[:pos + 1], line[pos + 1:] newline = (pre_ + '~' + post_).rstrip(' ') print(warn) print(line[pos]) assert False #assert line[pos] == ' ', '%r' % line[pos] break if len(pre_.strip()) == 0: new_prevline = buffer[ lineno - 1].rstrip() + newline.lstrip(' ') buffer[lineno - 1] = new_prevline delete_linenos.append(lineno) else: #print('newline = %r' % (newline,)) buffer[lineno] = newline #print(warn) if len(delete_linenos) > 0: mask = ut.index_to_boolmask(delete_linenos, len(buffer)) buffer = ut.compress(buffer, ut.not_list(mask)) newtext = '\n'.join(buffer) #ut.dump_autogen_code(fpath, newtext, 'tex', fullprint=False) ut.print_difftext( ut.get_textdiff(text, newtext, num_context_lines=4)) if ut.get_argflag('-w'): ut.writeto(fpath, newtext) else: print('Specify -w to finialize change')
need_encoding_fpaths = [] for pat in pattern_items: print('Checking for pattern: %r' % (pat,)) for fpath in fpath_list: pattern = re.escape(pat) found_lines, found_lxs = ut.grepfile(fpath, pattern) # DID NOT FIND ENCODING LINE if len(found_lines) == 0: need_encoding_fpaths.append(fpath) print('The following fpaths need encoding lines: ' + ut.list_str(need_encoding_fpaths, strvals=True)) if do_write or show_diff: for fpath in need_encoding_fpaths: print('\n-----------------\nFound file without encodeing line: ' + fpath) line_list = ut.read_lines_from(fpath) linenum = find_encoding_insert_position(line_list) if linenum is not None: #print(' * linenum = %r' % (linenum,)) new_lines = line_list[:linenum] + [encoding_line + '\n'] + line_list[linenum:] new_text = ''.join(new_lines) if show_diff: old_text = ''.join(line_list) textdiff = ut.get_textdiff(old_text, new_text, num_context_lines=1) print('Diff:') ut.print_difftext(textdiff) if do_write: ut.writeto(fpath, new_text) pass
def update_bindings(): r""" Returns: dict: matchtups CommandLine: python ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings utprof.py ~/local/build_scripts/flannscripts/autogen_bindings.py --exec-update_bindings Example: >>> # DISABLE_DOCTEST >>> from autogen_bindings import * # NOQA >>> import sys >>> import utool as ut >>> sys.path.append(ut.truepath('~/local/build_scripts/flannscripts')) >>> matchtups = update_bindings() >>> result = ('matchtups = %s' % (ut.repr2(matchtups),)) >>> print(result) >>> ut.quit_if_noshow() >>> import plottool as pt >>> ut.show_if_requested() """ from os.path import basename import difflib import numpy as np import re binding_names = [ 'build_index', 'used_memory', 'add_points', 'remove_point', 'compute_cluster_centers', 'load_index', 'save_index', 'find_nearest_neighbors', 'radius_search', 'remove_points', 'free_index', 'find_nearest_neighbors_index', # 'size', # 'veclen', # 'get_point', # 'flann_get_distance_order', # 'flann_get_distance_type', # 'flann_log_verbosity', # 'clean_removed_points', ] _places = [ '~/code/flann/src/cpp/flann/flann.cpp', '~/code/flann/src/cpp/flann/flann.h', '~/code/flann/src/python/pyflann/flann_ctypes.py', '~/code/flann/src/python/pyflann/index.py', ] eof_sentinals = { # 'flann_ctypes.py': '# END DEFINE BINDINGS', 'flann_ctypes.py': 'def ensure_2d_array(arr', # 'flann.h': '// END DEFINE BINDINGS', 'flann.h': '#ifdef __cplusplus', 'flann.cpp': None, 'index.py': None, } block_sentinals = { 'flann.h': re.escape('/**'), 'flann.cpp': 'template *<typename Distance>', # 'flann_ctypes.py': '\n', 'flann_ctypes.py': 'flann\.[a-z_.]* =', # 'index.py': ' def .*', 'index.py': ' [^ ].*', } places = {basename(fpath): fpath for fpath in ut.lmap(ut.truepath, _places)} text_dict = ut.map_dict_vals(ut.readfrom, places) lines_dict = {key: val.split('\n') for key, val in text_dict.items()} orig_texts = text_dict.copy() # NOQA binding_defs = {} named_blocks = {} print('binding_names = %r' % (binding_names,)) for binding_name in binding_names: blocks, defs = autogen_parts(binding_name) binding_defs[binding_name] = defs named_blocks[binding_name] = blocks for binding_name in ut.ProgIter(binding_names): ut.colorprint('+--- GENERATE BINDING %s -----' % (binding_name,), 'yellow') blocks_dict = named_blocks[binding_name] for key in places.keys(): ut.colorprint('---- generating %s for %s -----' % (binding_name, key,), 'yellow') # key = 'flann_ctypes.py' # print(text_dict[key]) old_text = text_dict[key] line_list = lines_dict[key] #text = old_text block = blocks_dict[key] debug = ut.get_argflag('--debug') # debug = True # if debug: # print(ut.highlight_code(block, splitext(key)[1])) # Find a place in the code that already exists searchblock = block if key.endswith('.cpp') or key.endswith('.h'): searchblock = re.sub(ut.REGEX_C_COMMENT, '', searchblock, flags=re.MULTILINE | re.DOTALL) searchblock = '\n'.join(searchblock.splitlines()[0:3]) # @ut.cached_func(verbose=False) def cached_match(old_text, searchblock): def isjunk(x): return False return x in ' \t,*()' def isjunk2(x): return x in ' \t,*()' # Not sure why the first one just doesnt find it # isjunk = None sm = difflib.SequenceMatcher(isjunk, old_text, searchblock, autojunk=False) sm0 = difflib.SequenceMatcher(isjunk, old_text, searchblock, autojunk=True) sm1 = difflib.SequenceMatcher(isjunk2, old_text, searchblock, autojunk=False) sm2 = difflib.SequenceMatcher(isjunk2, old_text, searchblock, autojunk=True) matchtups = (sm.get_matching_blocks() + sm0.get_matching_blocks() + sm1.get_matching_blocks() + sm2.get_matching_blocks()) return matchtups matchtups = cached_match(old_text, searchblock) # Find a reasonable match in matchtups found = False if debug: # print('searchblock =\n%s' % (searchblock,)) print('searchblock = %r' % (searchblock,)) for (a, b, size) in matchtups: matchtext = old_text[a: a + size] pybind = binding_defs[binding_name]['py_binding_name'] if re.search(binding_name + '\\b', matchtext) or re.search(pybind + '\\b', matchtext): found = True pos = a + size if debug: print('MATCHING TEXT') print(matchtext) break else: if debug and 0: print('Not matching') print('matchtext = %r' % (matchtext,)) matchtext2 = old_text[a - 10: a + size + 20] print('matchtext2 = %r' % (matchtext2,)) if found: linelens = np.array(ut.lmap(len, line_list)) + 1 sumlen = np.cumsum(linelens) row = np.where(sumlen < pos)[0][-1] + 1 #print(line_list[row]) # Search for extents of the block to overwrite block_sentinal = block_sentinals[key] row1 = ut.find_block_end(row, line_list, block_sentinal, -1) - 1 row2 = ut.find_block_end(row + 1, line_list, block_sentinal, +1) eof_sentinal = eof_sentinals[key] if eof_sentinal is not None: print('eof_sentinal = %r' % (eof_sentinal,)) row2 = min([count for count, line in enumerate(line_list) if line.startswith(eof_sentinal)][-1], row2) nr = len((block + '\n\n').splitlines()) new_line_list = ut.insert_block_between_lines( block + '\n', row1, row2, line_list) rtext1 = '\n'.join(line_list[row1:row2]) rtext2 = '\n'.join(new_line_list[row1:row1 + nr]) if debug: print('-----') ut.colorprint('FOUND AND REPLACING %s' % (binding_name,), 'yellow') print(ut.highlight_code(rtext1)) if debug: print('-----') ut.colorprint('FOUND AND REPLACED WITH %s' % (binding_name,), 'yellow') print(ut.highlight_code(rtext2)) if not ut.get_argflag('--diff') and not debug: print(ut.color_diff_text(ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True))) else: # Append to end of the file eof_sentinal = eof_sentinals[key] if eof_sentinal is None: row2 = len(line_list) - 1 else: row2_choice = [count for count, line in enumerate(line_list) if line.startswith(eof_sentinal)] if len(row2_choice) == 0: row2 = len(line_list) - 1 assert False else: row2 = row2_choice[-1] - 1 # row1 = row2 - 1 # row2 = row2 - 1 row1 = row2 new_line_list = ut.insert_block_between_lines( block + '\n', row1, row2, line_list) # block + '\n\n\n', row1, row2, line_list) rtext1 = '\n'.join(line_list[row1:row2]) nr = len((block + '\n\n').splitlines()) rtext2 = '\n'.join(new_line_list[row1:row1 + nr]) if debug: print('-----') ut.colorprint('NOT FOUND AND REPLACING %s' % (binding_name,), 'yellow') print(ut.highlight_code(rtext1)) if debug: print('-----') ut.colorprint('NOT FOUND AND REPLACED WITH %s' % (binding_name,), 'yellow') print(ut.highlight_code(rtext2)) if not ut.get_argflag('--diff') and not debug: print(ut.color_diff_text(ut.difftext(rtext1, rtext2, num_context_lines=7, ignore_whitespace=True))) text_dict[key] = '\n'.join(new_line_list) lines_dict[key] = new_line_list ut.colorprint('L___ GENERATED BINDING %s ___' % (binding_name,), 'yellow') for key in places: new_text = '\n'.join(lines_dict[key]) #ut.writeto(ut.augpath(places[key], '.new'), new_text) ut.writeto(ut.augpath(places[key]), new_text) for key in places: if ut.get_argflag('--diff'): difftext = ut.get_textdiff(orig_texts[key], new_text, num_context_lines=7, ignore_whitespace=True) difftext = ut.color_diff_text(difftext) print(difftext)
def define_model(): n_classes = 6 layers = [] # == # conv1 layers += conv_bn_relu(bot='data', suffix='1_1', nfilt=64) layers += conv_bn_relu(bot='conv1_1', suffix='1_2', nfilt=64) layers += [pool('conv1_2', 'pool1')] # ==== # conv2 layers += conv_bn_relu(bot='pool1', suffix='2_1', nfilt=128) layers += conv_bn_relu(bot='conv2_1', suffix='2_2', nfilt=128) layers += [pool('conv2_2', 'pool2')] # ====== # conv3 layers += conv_bn_relu(bot='pool2', suffix='3_1', nfilt=256) layers += conv_bn_relu(bot='conv3_1', suffix='3_2', nfilt=256) layers += conv_bn_relu(bot='conv3_2', suffix='3_3', nfilt=256) layers += [pool('conv3_3', 'pool3')] # ======== # conv4 layers += conv_bn_relu(bot='pool3', suffix='4_1', nfilt=512) layers += conv_bn_relu(bot='conv4_1', suffix='4_2', nfilt=512) layers += conv_bn_relu(bot='conv4_2', suffix='4_3', nfilt=512) layers += [pool('conv4_3', 'pool4')] # ========== # conv5 layers += conv_bn_relu(bot='pool4', suffix='5_1', nfilt=512) layers += conv_bn_relu(bot='conv5_1', suffix='5_2', nfilt=512) layers += conv_bn_relu(bot='conv5_2', suffix='5_3', nfilt=512) layers += [pool('conv5_3', 'pool5')] # ========== # up+conv5 layers += [upsample('pool5', '5', 2, width=30, height=23)] layers += conv_bn_relu(bot='pool5_D', suffix='5_3_D', nfilt=512) layers += conv_bn_relu(bot='conv5_3_D', suffix='5_2_D', nfilt=512) layers += conv_bn_relu(bot='conv5_2_D', suffix='5_1_D', nfilt=512) # ======== # up+conv4 layers += [upsample('conv5_1_D', '4', 2, width=60, height=45)] layers += conv_bn_relu(bot='pool4_D', suffix='4_3_D', nfilt=512) layers += conv_bn_relu(bot='conv4_3_D', suffix='4_2_D', nfilt=512) layers += conv_bn_relu(bot='conv4_2_D', suffix='4_1_D', nfilt=256) # ====== # up+conv3 layers += [upsample('conv4_1_D', '3', 2)] layers += conv_bn_relu(bot='pool3_D', suffix='3_3_D', nfilt=256) layers += conv_bn_relu(bot='conv3_3_D', suffix='3_2_D', nfilt=256) layers += conv_bn_relu(bot='conv3_2_D', suffix='3_1_D', nfilt=128) # ==== # up+conv2 layers += [upsample('conv3_1_D', '2', 2)] layers += conv_bn_relu(bot='pool2_D', suffix='2_2_D', nfilt=128) layers += conv_bn_relu(bot='conv2_2_D', suffix='2_1_D', nfilt=64) # == # up+conv1 layers += [upsample('conv2_1_D', '1', 2)] layers += conv_bn_relu(bot='pool1_D', suffix='1_2_D', nfilt=64) # output mask layers += [convolution('conv1_2_D', 'conv1_1_D_output' + str(n_classes), 'conv1_1_D_output6', nfilt=n_classes)] core_layer_auto = '\n'.join(layers) core_layer_old = CORE_LAYERS.format(n_classes=n_classes) import utool as ut print(ut.color_diff_text(ut.get_textdiff(core_layer_old, core_layer_auto, num_context_lines=10)))