def set_params_dog(self): u.printf("Loading params for DoG processing...") self.params.min_sigma = 20 self.params.max_sigma = 50 self.params.threshold = 2.0 self.params.overlap = 0.5 self.params.sigma_ratio = 1.6
def main(): # load existing all.db mw.progress.start(label='Loading existing all.db', immediate=True) t_0 = time.time() cur = util.allDb() if cfg1('loadAllDb') else None printf('Loaded all.db in %f sec' % (time.time() - t_0)) mw.progress.finish() # update all.db allDb = mkAllDb(cur) # merge in external.db mw.progress.start(label='Merging ext.db', immediate=True) ext = MorphDb(cfg1('path_ext'), ignoreErrors=True) allDb.merge(ext) mw.progress.finish() # update notes knownDb = updateNotes(allDb) # update stats and refresh display stats.updateStats(knownDb) mw.toolbar.draw() # set global allDb util._allDb = allDb
def main(): # load existing all.db mw.progress.start( label='Loading existing all.db', immediate=True ) t_0 = time.time() cur = util.allDb() if cfg1('loadAllDb') else None printf( 'Loaded all.db in %f sec' % ( time.time() - t_0 ) ) mw.progress.finish() # update all.db allDb = mkAllDb( cur ) # merge in external.db mw.progress.start( label='Merging ext.db', immediate=True ) ext = MorphDb( cfg1('path_ext'), ignoreErrors=True ) allDb.merge( ext ) mw.progress.finish() # update notes knownDb = updateNotes( allDb ) # update stats and refresh display stats.updateStats( knownDb ) mw.toolbar.draw() # set global allDb util._allDb = allDb
def setGameBit(self, bit, val): entry, addr = self._calcGameBitAddr(bit) if addr is None: printf("Can't set bit 0x%X, no save game loaded\n", bit) return nBits = entry['nBits'] firstBit = entry['offset'] lastBit = firstBit + nBits idx = 0 maxVal = (1 << nBits) - 1 if val < 0: val += maxVal if val < 0 or val > maxVal: printf("Value out of range (0 to %d, got %d)\n", maxVal, val) return for i in range(firstBit, lastBit): byte = self.client.read(addr + (i >> 3), 'B') oldB = byte mask = (1 << (i & 7)) b = val & (1 << idx) if b: byte |= mask else: byte &= ~mask #printf("write 0x%02X -> 0x%02X at 0x%08X, idx %d val %d\n", # oldB, byte, addr + (i >> 3), idx, 1 if b else 0) self.client.write(addr + (i >> 3), bytes([byte])) idx += 1 if idx == 8: idx = 0
def sleep(secs): printf("Sleeping ... ") sys.stdout.flush() for i in xrange(0,secs+1): printf("%i ",i) time.sleep(1.0) sys.stdout.flush()
def build_lbs(reqs,url,delay): i = 0 for lb in reqs: headers = load_headers() printf("\n\nurl=%s\ndata=%s\n",url,lb) request = urllib2.Request(url, lb, headers) i += 1 printf("Building %i\n",i) try: start = time.time() startTime = time.strftime("%a %m/%d/%y %H:%M:%S", time.localtime()) write_log("Start: sending request %s"%startTime, "", "", "") resp = urllib2.urlopen(request) end = time.time() endTime = time.strftime("%a %m/%d/%y %H:%M:%S", time.localtime()) reqTime = end - start printf("%s%s", "Took %.2g"%reqTime," seconds to return a response \n") write_log("End response recieved %s \n"%endTime,"Took %g to return successful response: "%reqTime, resp.read(), resp.code) countdown(delay) printf("Response code: %s\n %s\n",resp.code, resp.read()) except urllib2.HTTPError, e: msg = e.read() code = e.code end = time.time() endTime = time.strftime("%a %m/%d/%y %H:%M:%S", time.localtime()) printf("Error code=%s\nbody=%s\n",code,msg) write_log("End: response recieved %s\n"%endTime, msg, lb, code) countdown(delay)
def __init__(self, root, test_info): self.path = test_info.path(root) data = {} result_path = self.path / 'result.json' if result_path.exists(): try: with result_path.open() as fp: data = json.load(fp) except Exception as ex: data = {'exception': str(ex)} self.exception = data.get('exception') self.compile_duration = data.get('compile_duration') self.execution_duration = data.get('duration_per_example', data.get('execution_duration')) self.np_data = None np_result_path = self.path / 'result.npy' if np_result_path.exists(): try: self.np_data = np.load(np_result_path) except Exception as ex: util.printf(' Exception:', ex) if self.exception is None: self.exception = str(ex)
def listLoadedObjects(self): # read ptr to object list pPlayer = self.client.read(0x803428f8, '>I') cnt, ptr = self.client.read(0x803dcb84, ">II") if ptr == 0: print("objPtr is NULL") return ptrs = self.client.read(ptr, ">%dI" % cnt) printf("objPtr = %08X nObjs = %d\n", ptr, cnt) printf( "\x1B[1mObj│Address │Name │ID │Def │ModelPtr│XPos │YPos │ZPos │Ch│Seq\x1B[0m\n" ) for i in range(cnt): pObj = ptrs[i] pObj = self.client.read(ptr + (i * 4), ">I") if pObj == 0: printf("NULL at object %d\n", i) break obj = GameObject(self.client, pObj) if obj == pPlayer: printf("\x1B[1m") # bold printf( "\x1B[48;5;%dm%3d│%08X│%-11s│%04X│%04X│%08X│%+9.2f│%+9.2f│%+9.2f│%2d│%08X %d\x1B[0m\n", ROW_COLOR[i & 1], i, pObj, obj.name, obj.objId, obj.defNo, obj.models, obj.pos[0], obj.pos[1], obj.pos[2], obj.nChildren, obj.seq, obj.curSeq)
def set_params_doh(self): u.printf("Loading params for DoH processing...") self.params.min_sigma = 15 self.params.max_sigma = 60 self.params.num_sigma = 15 self.params.threshold = 0.01 self.params.overlap = 0.5 self.params.log_Scale = True
def listObjModels(self, addr): obj = self.readObject(addr) pModels = self.client.read(obj.models, ">%dI" % obj.file.nModels) for i in range(obj.file.nModels): model = Model(self.client, pModels[i]) printf("\nModel %d @ 0x%08X:\n", i, pModels[i]) model.printSelf() self.listModelDlists(pModels[i])
def set_params_log(self): u.printf("Loading params for LoG processing...") self.params.min_sigma = 20 self.params.max_sigma = 40 self.params.num_sigma = 5 self.params.threshold = 0.2 self.params.overlap = 0.7 self.params.log_Scale = False
def cmd_build(args, remainder): with open('ci/plan.yml') as file_: plan = yaml.safe_load(file_) env = os.environ.copy() variant = plan['VARIANTS'][args.variant] for key, value in variant.get('env', {}).items(): env[key] = str(value) build_root = variant.get('build_root', 'build-x86_64') build_type = variant.get('build_type', 'Release') check = variant.get('check', 'smoke') system = variant.get('system', 'Linux') temp_dir = Path('/tmp') / os.getenv('BUILDKITE_AGENT_NAME') build_dir = Path(build_root) / build_type logs_dir = Path('logs').resolve() logs_dir.mkdir(parents=True, exist_ok=True) util.printf('--- :building_construction: configure') configure_log = logs_dir / 'configure.log' with configure_log.open('wb') as fp: util.check_call([ 'python', 'configure', '--ci', f'--temp={temp_dir}', f'--type={build_type}' ], env=env, stdout=fp, stderr=subprocess.STDOUT) util.printf('--- :hammer_and_wrench: ninja') util.check_call(['ninja', '-C', build_dir], env=env) util.printf('--- :hammer_and_wrench: ninja package') util.check_call(['ninja', '-C', build_dir, 'package'], env=env) util.printf(f'--- :hammer_and_wrench: ninja check-{check}') check_log = logs_dir / f'check-{check}.log' with check_log.open('wb') as fp: util.check_call(['ninja', '-C', build_dir, f'check-{check}'], env=env, stdout=fp, stderr=subprocess.STDOUT) util.printf('--- Test devkit') devkit_dir = build_dir / '_CPack_Packages' / system / 'TGZ' / f'PlaidML-1.0.0-{system}' / 'devkit' devkit_build_dir = devkit_dir / 'build' cmd = ['cmake'] cmd += ['-S', devkit_dir] cmd += ['-B', devkit_build_dir] cmd += ['-G', 'Ninja'] util.check_call(cmd, env=env) util.check_call(['ninja', '-C', devkit_build_dir], env=env) util.check_call([devkit_build_dir / 'edsl_test'], env=env) if 'dbg' not in args.variant: util.buildkite_upload(build_dir / '*.whl') util.buildkite_upload(build_dir / '*.tar.gz')
def export_data(self, circles, output_file): try: with open(output_file, 'w') as csv_file: writer = csv.writer(csv_file,delimiter=",") for circle in circles: y, x, r = circle writer.writerow([x,y,r]) except: u.printf("Unable to write data to file: {}".format(output_file))
def check_batch_size(self, data_set_size, batch_size): u.printf("Validating batch size of {}...".format(batch_size)) original_batch_size = batch_size if batch_size > data_set_size: while batch_size > data_set_size: batch_size /= 2 u.printf( "Batch size {} is greater than data set size {}\nBatch size has been reduced to {}" .format(original_batch_size, data_set_size, batch_size))
def cmd_build(args, remainder): import yaml with open('ci/plan.yml') as file_: plan = yaml.safe_load(file_) env = os.environ.copy() variant = plan['VARIANTS'][args.variant] for key, value in variant['env'].items(): env[key] = str(value) explain_log = 'explain.log' profile_json = 'profile.json.gz' bazel_config = variant.get('bazel_config', args.variant) common_args = [] common_args += ['--config={}'.format(bazel_config)] common_args += ['--define=version={}'.format(args.version)] common_args += ['--experimental_generate_json_trace_profile'] common_args += ['--experimental_json_trace_compression'] common_args += ['--experimental_profile_cpu_usage'] common_args += ['--explain={}'.format(explain_log)] common_args += ['--profile={}'.format(profile_json)] common_args += ['--verbose_failures'] common_args += ['--verbose_explanations'] util.printf('--- :bazel: Running Build...') if platform.system() == 'Windows': util.check_call(['git', 'config', 'core.symlinks', 'true']) cenv = util.CondaEnv(pathlib.Path('.cenv')) cenv.create('environment-windows.yml') env.update(cenv.env()) util.check_call(['bazelisk', 'test', '...'] + common_args, env=env) util.printf('--- :buildkite: Uploading artifacts...') buildkite_upload(explain_log) buildkite_upload(profile_json) shutil.rmtree('tmp', ignore_errors=True) tarball = os.path.join('bazel-bin', 'pkg.tar.gz') with tarfile.open(tarball, "r") as tar: wheels = [] for item in tar.getmembers(): if item.name.endswith('.whl'): wheels.append(item) tar.extractall('tmp', members=wheels) buildkite_upload('*.whl', cwd='tmp') archive_dir = os.path.join( args.root, args.pipeline, args.build_id, 'build', args.variant, ) os.makedirs(archive_dir, exist_ok=True) shutil.copy(tarball, archive_dir)
def wheel_clean(dirs): for wheel_dir in dirs: for f in wheel_dir.glob('*.whl'): if f.is_file(): util.printf('deleting: ' + str(f.resolve())) try: os.remove(f) except PermissionError: import stat os.chmod(f, stat.S_IWRITE) os.remove(f)
def load_headers(): h = pickle.load(open("auth_headers.db","r")) intended_keys = ["x-auth-token"] headers = {} headers["content-type"] = "application/xml" headers["accept"] = "application/xml" if os.path.isfile("extra_headers.json"): extra_headers = util.load_json("extra_headers.json") headers.update(extra_headers) for auth_key in intended_keys: headers[auth_key]=h[auth_key] printf("\n\nheaders=%s\n",headers) return headers
def check_convolutional_layers(self): u.printf("Validating convolutional layers...") previous_size = self.image_size for i in range(self.convolutional_layer_count): current_size = previous_size / 2 if current_size != 1: previous_size = current_size continue u.printf( "Maximum Convolutional Layer value reached\nSetting Convolutional Layer Count value to maximum: {}" .format(i)) self.convolutional_layer_count = i break
def help(self): """Display help text.""" methods = [ (func, getattr(self, func)) for func in dir(self) if callable(getattr(self, func)) and not func.startswith('_') ] for name, method in sorted(methods, key=lambda it: it[0]): if method.__doc__ is None: printf("%s: (no documentation)\n", name) else: doc = method.__doc__.strip() doc = re.sub(r'\n\n+\s*', '\n ', doc) printf("%s: %s\n", name, doc)
def reorder(self): util.printf('Opening {}...'.format(self.infile)) util.printf('Saving output file as {}...'.format(self.outfile)) out_header = [] reordered_header = [] out_dir = os.path.join(self.ROOT_DIR, self.OUTPUT_DIR) if not os.path.exists(out_dir): os.makedirs(out_dir) try: with open(self.targetfile, 'r') as ft: target_csv = csv.reader(ft) out_header = target_csv.next()[1:] reordered_header = [ r for r in target_csv if util.path_leaf(self.infile_name) in r ] if len(reordered_header) > 1: self.output( 'ERROR: multiple csv files with same name ({})'.format( self.infile_name)) sys.exit(1) if len(reordered_header) == 0: self.output('ERROR: no csv files with name {}'.format( self.infile_name)) sys.exit(1) reordered_header = reordered_header[0][1:] ft.close() except: self.output('ERROR: Could not open {}'.format(self.targetfile)) sys.exit(1) try: with open(self.infile, 'r') as fi, open(self.outfile, 'w') as fo: in_csv = csv.reader(fi) writer = csv.DictWriter(fo, fieldnames=reordered_header, extrasaction='ignore') header = csv.DictWriter(fo, fieldnames=out_header, extrasaction='ignore') header.writeheader() for r in csv.DictReader(fi): writer.writerow(r) fi.close() fo.close() except: self.output('ERROR: Could not open {} or create {}'.format( self.infile, self.outfile)) sys.exit(1)
def watchGame(self): """Display general game state info.""" self._checkConnected() try: while True: #time.sleep(1.0 / 60.0) time.sleep(0.1) # do all reads at once #texts = self.client.read(0x803a3800, '>5b') nObj, pObj = self.client.read(0x803dcb84, '>II') map = self.game.getCurMap() pPlayer = self.client.read(0x803428f8, '>I') x, y, z = self.client.read(pPlayer + 0x0C, '>3f') # XXX what are these? mapX, mapZ, cellX, cellZ = \ self.client.read(0x803dcdc8, '>4i') #cellX, cellZ = math.ceil(x/640), math.ceil(z/640) animId = self.client.read(pPlayer + 0xA0, '>H') playTime = self.client.read(0x803a32a8 + 0x560, '>f') # frames playerState = self.client.read(0x803A32A8, 24) printf( "\x1B[H" + # cursor to 1,1 "Map: %04X \x1B[48;5;19m%-28s\x1B[0m Type %02X unk %02X %04X Objs: %4d @%08X \r\n", map['id'], map['name'], map['type'], map['field_1d'], map['field_1e'], nObj, pObj) printf( "Coords: %+8.2f %+8.2f %+8.2f Cell %4d %4d @ %4d %4d \r\n", x, y, z, cellX, cellZ, mapX, mapZ) printf("Anim %04X PlayTime %02d:%02d:%02d\n", animId, playTime // 216000, (playTime // 3660) % 60, (playTime // 60) % 60) playerNames = ('K', 'F') for i in range(2): curHP, maxHP, unk2, unk3, curMP, maxMP, money, \ curLives, maxLives, unkB = struct.unpack_from( '>bbbbhhbbbb', playerState, i*12) printf( "%s HP:\x1B[48;5;19m%5.2f/%5.2f\x1B[0m " + "MP:\x1B[48;5;19m%3d/%3d\x1B[0m " + "$\x1B[48;5;19m%3d\x1B[0m " + "B:\x1B[48;5;19m%3d/%3d\x1B[0m " + "unk=%d, %d, %d \r\n", playerNames[i], curHP / 4, maxHP / 4, curMP, maxMP, money, curLives, maxLives, unk2, unk3, unkB) self.game.heapStats() self.client.conn.send(b"\xAA") except KeyboardInterrupt: self.client.conn.send(b"\xAA")
def listModelDlists(self, addr): model = Model(self.client, addr) printf( "\x1B[1m# │ListAddr│Size│Bbox 1 │Bbox 2 │12│Sd│ 14│ 16│ 18\x1B[0m\n" ) for i in range(model.header.nDlists): dlist = DisplayListPtr(self.client, model.header.pDlists + (i * 0x1C)) printf( "\x1B[48;5;%dm%3d│%08X│%04X│%+6d %+6d %+6d│%+6d %+6d %+6d│%02X│%02X│%04X│%04X│%08X\x1B[0m\n", ROW_COLOR[i & 1], i, dlist.offset, dlist.size, dlist.bbox[0], dlist.bbox[1], dlist.bbox[2], dlist.bbox[3], dlist.bbox[4], dlist.bbox[5], dlist.unk12, dlist.shaderId, dlist.unk14, dlist.unk16, dlist.unk18)
def __init__(self, model_directory, input_image, output_directory): u.printf("Creating model parameters") self.train_params = hp.ModelParameters() self.exec_params = hp.ExecutionParameters() u.printf("Initializing Neural Network parameters...") self.x = tf.placeholder('float32', [ None, (self.train_params.image_size * self.train_params.image_size) ]) self.keep_prob = tf.placeholder(tf.float32) """Assign passed in variables if they exist""" self.exec_params.model_directory = model_directory self.exec_params.input_image = input_image self.exec_params.output_directory = output_directory
def mkAllDb( allDb=None ): t_0, db, TAG = time.time(), mw.col.db, mw.col.tags N_notes = db.scalar( 'select count() from notes' ) mw.progress.start( label='Prep work for all.db creation', max=N_notes, immediate=True ) if not allDb: allDb = MorphDb() fidDb = allDb.fidDb() locDb = allDb.locDb( recalc=False ) # fidDb() already forces locDb recalc mw.progress.update( label='Generating all.db data' ) for i,( nid, mid, flds, guid, tags ) in enumerate( db.execute( 'select id, mid, flds, guid, tags from notes' ) ): if i % 500 == 0: mw.progress.update( value=i ) C = partial( cfg, mid, None ) if not C('enabled'): continue mats = [ ( 0.5 if ivl == 0 and ctype == 1 else ivl ) for ivl, ctype in db.execute( 'select ivl, type from cards where nid = :nid', nid=nid ) ] ts, alreadyKnownTag = TAG.split( tags ), C('tag_alreadyKnown') if alreadyKnownTag in ts: mats += [ C('threshold_mature')+1 ] for fieldName in C('morph_fields'): try: # if doesn't have field, continue #fieldValue = normalizeFieldValue( getField( fieldName, flds, mid ) ) fieldValue = getMecabField( fieldName, flds, mid ) except KeyError: continue except TypeError: mname = mw.col.models.get( mid )[ 'name' ] errorMsg( u'Failed to get field "{field}" from a note of model "{model}". Please fix your config.py file to match your collection appropriately and ignore the following error.'.format( model=mname, field=fieldName ) ) raise loc = fidDb.get( ( nid, guid, fieldName ), None ) if not loc: loc = AnkiDeck( nid, fieldName, fieldValue, guid, mats ) ms = getMorphemes( fieldValue ) if ms: #TODO: this needed? should we change below too then? #printf( ' .loc for %d[%s]' % ( nid, fieldName ) ) locDb[ loc ] = ms else: # mats changed -> new loc (new mats), move morphs if loc.fieldValue == fieldValue and loc.maturities != mats: printf( ' .mats for %d[%s]' % ( nid, fieldName ) ) newLoc = AnkiDeck( nid, fieldName, fieldValue, guid, mats ) locDb[ newLoc ] = locDb.pop( loc ) # field changed -> new loc, new morphs elif loc.fieldValue != fieldValue: printf( ' .morphs for %d[%s]' % ( nid, fieldName ) ) newLoc = AnkiDeck( nid, fieldName, fieldValue, guid, mats ) ms = getMorphemes( fieldValue ) locDb.pop( loc ) locDb[ newLoc ] = ms printf( 'Processed all %d notes in %f sec' % ( N_notes, time.time() - t_0 ) ) mw.progress.update( value=i, label='Creating all.db object' ) allDb.clear() allDb.addFromLocDb( locDb ) if cfg1('saveDbs'): mw.progress.update( value=i, label='Saving all.db to disk' ) allDb.save( cfg1('path_all') ) printf( 'Processed all %d notes + saved all.db in %f sec' % ( N_notes, time.time() - t_0 ) ) mw.progress.finish() return allDb
def heapStats(self): printf( "\x1B[1m#│Data │Size │End │Used │Free │Slots │UsedSlot│FreeSlot│FreeB│FreeS\x1B[0m\n" ) heaps = [] for i in range(5): hs = HeapStruct(self.client, 0x803406A0 + (i * HeapStruct.SIZE)) heaps.append(hs) for i, hs in enumerate(heaps): printf( "\x1B[48;5;%dm%d│%08X│%08X│%08X│%08X│%08X│%08X│%08X│%08X│%4d%%│%4d%%\x1B[0m\n", ROW_COLOR[i & 1], i, hs.data, hs.size, hs.data + hs.size, hs.usedSize, hs.size - hs.usedSize, hs.avail, hs.used, hs.avail - hs.used, (1 - (hs.usedSize / hs.size)) * 100 if hs.size != 0 else 0, (1 - (hs.used / hs.avail)) * 100 if hs.avail != 0 else 0)
def cmd_build(args, remainder): import yaml with open('ci/plan.yml') as file_: plan = yaml.safe_load(file_) env = os.environ.copy() variant = plan['VARIANTS'][args.variant] for key, value in variant['env'].items(): env[key] = str(value) util.printf('--- :snake: pre-build steps... ') util.printf('delete any old whl files...') wheel_dirs = [ wheel_path('plaidml').resolve(), wheel_path('plaidml/keras').resolve(), wheel_path('plaidbench').resolve(), ] wheel_clean(wheel_dirs) explain_log = 'explain.log' profile_json = 'profile.json.gz' bazel_config = variant.get('bazel_config', args.variant) common_args = [] common_args += ['--config={}'.format(bazel_config)] common_args += ['--define=version={}'.format(args.version)] common_args += ['--experimental_generate_json_trace_profile'] common_args += ['--experimental_json_trace_compression'] common_args += ['--experimental_profile_cpu_usage'] common_args += ['--explain={}'.format(explain_log)] common_args += ['--profile={}'.format(profile_json)] common_args += ['--verbose_failures'] common_args += ['--verbose_explanations'] util.printf('--- :bazel: Running Build...') if platform.system() == 'Windows': util.check_call(['git', 'config', 'core.symlinks', 'true']) cenv = util.CondaEnv(pathlib.Path('.cenv')) cenv.create('environment-windows.yml') env.update(cenv.env()) util.check_call(['bazelisk', 'test', '...'] + common_args, env=env) util.printf('--- :buildkite: Uploading artifacts...') buildkite_upload(explain_log) buildkite_upload(profile_json) for wheel_dir in wheel_dirs: buildkite_upload('*.whl', cwd=wheel_dir) archive_dir = os.path.join( args.root, args.pipeline, args.build_id, 'build', args.variant, ) os.makedirs(archive_dir, exist_ok=True) shutil.copy(os.path.join('bazel-bin', 'pkg.tar.gz'), archive_dir)
def make_all_wheels(workdir): util.printf('clearing workdir: {}'.format(workdir)) shutil.rmtree(workdir, ignore_errors=True) workdir.mkdir(parents=True, exist_ok=True) util.printf('downloading wheels...') util.buildkite_download('*.whl', str(workdir), cwd=workdir) tarball = 'all_wheels.tar.gz' util.printf('creating {}'.format(tarball)) with tarfile.open(tarball, "w:gz") as tar: for whl in workdir.glob('*.whl'): util.printf('adding {}'.format(whl)) tar.add(whl, arcname=whl.name) util.printf('uploading {}'.format(tarball)) util.buildkite_upload(tarball)
def load_datasets(self): u.printf("Loading file names...") dataset_images, dataset_labels = self.read_image_label_names() total_data = len(dataset_labels) test_end_index = int(total_data * self.dp.test_size) validate_end_index = int(test_end_index + (total_data * self.dp.val_size)) train_end_index = total_data u.printf("Randomizing loaded data...") dataset_labels, dataset_images = shuffle(dataset_labels, dataset_images) return dataset_labels[0:test_end_index],dataset_images[0:test_end_index], \ dataset_labels[test_end_index:validate_end_index], dataset_images[test_end_index:validate_end_index], \ dataset_labels[validate_end_index:train_end_index],dataset_images[validate_end_index:train_end_index]
def dumpGameBits(self): self._readBitTable() pSaveGame = self.client.read(0x803dcae0, '>I') tables = ( 0x803a4198, # temp bits pSaveGame + 0x564, pSaveGame + 0x24, pSaveGame + 0x5D8, ) sizes = (0x80, 0x74, 0x144, 0xAC) tableData = [] for i, addr in enumerate(tables): if addr >= 0x80000000: tableData.append(self.client.read(addr, sizes[i])) else: tableData.append(b'\0' * sizes[i]) printf( "\x1B[1mBit#│T│Unk │MaxValue │Value │MaxHex │ValHex\x1B[0m\n" ) for iBit in range(0xF58): entry = self._bitTable[iBit] tblIdx = entry['tblIdx'] tData = tableData[tblIdx] nBits = entry['nBits'] firstBit = entry['offset'] lastBit = firstBit + nBits maxVal = (1 << nBits) - 1 val = 0 idx = 0 for i in range(firstBit, lastBit): n = i >> 3 b = tData[n] if n < len(tData) else 0 if b & (1 << (i & 7)): val |= 1 << idx idx += 1 printf( "\x1B[48;5;%dm%04X│%d│%02X %d│%10d│%-10d│%08X|%08X\x1B[0m\n", ROW_COLOR[iBit & 1], iBit, tblIdx, entry['unk03'], entry['unk13'], maxVal, val, maxVal, val)
def execute(self): self.load_data() execution_type="" if self.params.mode==1: if len(sys.argv) <= 1: self.set_params_log() self.statistics() data = self.run_log() execution_type="log" elif self.params.mode==2: if len(sys.argv) <= 1: self.set_params_dog() self.statistics() data = self.run_dog() execution_type="dog" elif self.params.mode==3: if len(sys.argv) <= 1: self.set_params_doh() self.statistics() data = self.run_doh() execution_type="doh" u.printf("Generating blob image") self.display_data(image, data, ("{}/{}_{}_{}.pdf".format(self.params.output_dir, execution_type, self.params.image_prefix ,self.params.image_postfix))) if self.params.export_overlay: u.printf("Creating overlay image") overlay_image = misc.imread(self.params.overlay_file) self.display_data(overlay_image, data, ("{}/{}_{}_overlay_{}.pdf".format(self.params.output_dir, execution_type, self.params.image_prefix, self.params.image_postfix))) if self.params.export_data: u.printf("Exporting CSV data") self.export_data(data, ("{}/{}_{}_data_{}.csv".format(self.params.output_dir, execution_type, self.params.image_prefix, self.params.image_postfix)))
def replaceEntities(rooms, replaced, replacement): numEnts = 0 numRooms = 0 def checkEq(ent, b): return ( ent.Type == b[0] and (b[1] < 0 or ent.Variant == b[1]) and (b[2] < 0 or ent.Subtype == b[2]) ) def fixEnt(ent, b): ent.Type = b[0] if b[1] >= 0: ent.Variant = b[1] if b[2] >= 0: ent.Subtype = b[2] for currRoom in rooms: n = 0 for stack, x, y in currRoom.spawns(): for ent in stack: if checkEq(ent, replaced): fixEnt(ent, replacement) n += 1 if n > 0: numRooms += 1 numEnts += n printf( f"{replaced} -> {replacement}: " + ( numEnts > 0 and f"Replaced {numEnts} entities in {numRooms} rooms" or "No entities to replace!" ) ) return numEnts
def _check_result(self): util.printf(self.test_info, self.cur.compile_duration, self.ref.execution_duration, self.cur.execution_duration, self.ratio, self.efficiency) skip = self.test_info.workload.get('skip', False) expected = self.test_info.workload.get('expected') precision = self.test_info.workload.get('precision') perf_threshold = self.test_info.workload.get('perf_threshold', DEFAULT_RATIO_THRESHOLD) correct = self.test_info.workload.get('correct', True) popt = util.PlanOption(self.test_info.suite, self.test_info.workload, self.test_info.platform) compare = popt.get('compare', True) if not self.cur.exists(): util.printf(' missing cur') if not compare and not self.ref.exists(): util.printf(' missing ref') test_result = TestResult(skip, compare) try: if self.cur.exception: first_line = self.cur.exception.split('\n')[0] if expected: if expected not in self.cur.exception: test_result.add_failure('Expected: %r' % expected) else: test_result.set_expected(first_line) else: test_result.add_failure(first_line) elif compare: if not self.ref.execution_duration: test_result.add_error('Missing reference duration') elif not self.cur.execution_duration: test_result.add_error('Missing result duration') else: if self.ratio < perf_threshold: test_result.add_failure('Performance regression') base_output = self.golden.np_data if precision != 'untested': # If base_output is None and precision == 'untested' then # this is interpreted to mean no correctness test is desired; # so no error that it's missing in result. if base_output is None: test_result.add_error('Golden correctness data not found') else: if self.cur.np_data is None: test_result.add_error('Missing correctness test output') else: self._check_correctness(base_output, self.cur.np_data, test_result, precision, correct) except Exception as ex: import traceback traceback.print_exc() test_result.add_error(str(ex)) return test_result
def cmd_pipeline(args, remainder): import pystache import yaml with open('ci/plan.yml') as file_: plan = yaml.safe_load(file_) variants = [] for variant in plan['VARIANTS'].keys(): variants.append( dict( name=variant, python=get_python(variant), emoji=get_emoji(variant), )) tests = [] for test in util.iterate_tests(plan, args.pipeline): if test.shards > 1: shard = dict(id=test.shard_id, count=test.shards) shard_emoji = get_shard_emoji(test.shard_id) else: shard = None shard_emoji = '' tests.append( dict(suite=test.suite_name, workload=test.workload_name, platform=test.platform_name, batch_size=test.batch_size, variant=test.variant, timeout=test.timeout, retry=test.retry, soft_fail=test.soft_fail, python=get_python(test.variant), shard=shard, shard_emoji=shard_emoji, emoji=get_emoji(test.variant), engine=get_engine(test.platform_name))) if args.count: util.printf('variants: {}'.format(len(variants))) util.printf('tests : {}'.format(len(tests))) util.printf('total : {}'.format(len(variants) + len(tests))) else: ctx = dict(variants=variants, tests=tests) yml = pystache.render(load_template('pipeline.yml'), ctx) util.printf(yml)
def check_for_download(self, row, name): """ check to see if we should download any images from this row """ download_image_increment = 0 images = [] for val in row: if not val.startswith('http'): continue download_img = name if download_image_increment: download_img += '_' + str(download_image_increment) download_img += self.IMG_EXTENSION save_file = os.path.join(self.image_dir, download_img) images.append(download_img) download_image_increment += 1 if os.path.isfile(save_file): util.printf( 'Already downloaded {}, skipping...'.format(download_img)) continue response = urllib2.urlopen(val) util.printf('Downloading {}...'.format(download_img)) with open(save_file, 'wb') as outfile: outfile.write(response.read()) return images
def read_image_label_names(self): dataset_images = [] dataset_labels = [] number_loaded = 0 number_of_files = len( glob('{}/*.{}'.format(self.dp.dataset_location, self.dp.input_file_ext))) for i in range(number_of_files): number_loaded += 1 file_name = '{}/{}_{}.{}'.format(self.dp.dataset_location, self.dp.image_prefix, i + 1, self.dp.input_file_ext) dataset_images.append(file_name) file_name = '{}/{}_{}.{}'.format(self.dp.dataset_location, self.dp.image_prefix, i + 1, "txt") dataset_labels.append(file_name) if number_loaded >= self.dp.samples_to_load != 0: break if number_loaded % self.dp.display == 0 and self.dp.display != 0: u.printf("Loaded {} file names".format(number_loaded)) u.printf("Total file names: {}".format(i + 1)) return np.array(dataset_images), np.array(dataset_labels)
def usage(prog): printf( "Usage:Configure several json configs to build multiple loadbalancer request objects (will pull attributes from reqConfig.json) \n" )
def updateNotes( allDb ): t_0, now, db, TAG = time.time(), intTime(), mw.col.db, mw.col.tags ds, nid2mmi = [], {} N_notes = db.scalar( 'select count() from notes' ) mw.progress.start( label='Updating data', max=N_notes, immediate=True ) fidDb = allDb.fidDb() locDb = allDb.locDb( recalc=False ) # fidDb() already forces locDb recalc # handle secondary databases mw.progress.update( label='Creating seen/known/mature from all.db' ) seenDb = filterDbByMat( allDb, cfg1('threshold_seen') ) knownDb = filterDbByMat( allDb, cfg1('threshold_known') ) matureDb = filterDbByMat( allDb, cfg1('threshold_mature') ) mw.progress.update( label='Loading priority.db' ) priorityDb = MorphDb( cfg1('path_priority'), ignoreErrors=True ).db if cfg1('saveDbs'): mw.progress.update( label='Saving seen/known/mature dbs' ) seenDb.save( cfg1('path_seen') ) knownDb.save( cfg1('path_known') ) matureDb.save( cfg1('path_mature') ) mw.progress.update( label='Calculating frequency information' ) pops = [ len( locs ) for locs in allDb.db.values() ] pops = [ n for n in pops if n > 1 ] mw.progress.update( label='Updating notes' ) for i,( nid, mid, flds, guid, tags ) in enumerate( db.execute( 'select id, mid, flds, guid, tags from notes' ) ): if i % 500 == 0: mw.progress.update( value=i ) C = partial( cfg, mid, None ) if not C('enabled'): continue # Get all morphemes for note ms = set() for fieldName in C('morph_fields'): try: loc = fidDb[ ( nid, guid, fieldName ) ] ms.update( locDb[ loc ] ) except KeyError: continue ms = [ m for m in ms if m.pos not in C('morph_blacklist') ] # Determine un-seen/known/mature and i+N unseens, unknowns, unmatures, newKnowns = set(), set(), set(), set() for m in ms: if m not in seenDb.db: unseens.add( m ) if m not in knownDb.db: unknowns.add( m ) if m not in matureDb.db: unmatures.add( m ) if m not in matureDb.db and m in knownDb.db: newKnowns.add( m ) # Determine MMI - Morph Man Index N, N_s, N_k, N_m = len( ms ), len( unseens ), len( unknowns ), len( unmatures ) # Bail early for lite update if N_k > 2 and C('only update k+2 and below'): continue # average frequency of unknowns (ie. how common the word is within your collection) F_k = 0 for focusMorph in unknowns: # focusMorph used outside loop F_k += len( allDb.db[ focusMorph ] ) F_k_avg = F_k / N_k if N_k > 0 else F_k usefulness = F_k_avg # add bonus for morphs in priority.db isPriority = False for focusMorph in unknowns: if focusMorph in priorityDb: isPriority = True usefulness += C('priority.db weight') # add bonus for studying recent learned knowns (reinforce) for m in newKnowns: locs = allDb.db[ m ] if locs: ivl = min( 1, max( loc.maturity for loc in locs ) ) usefulness += C('reinforce new vocab weight') / ivl #TODO: maybe average this so it doesnt favor long sentences if any( m.pos == u'動詞' for m in unknowns ): #FIXME: this isn't working??? usefulness += C('verb bonus') usefulness = 999 - min( 999, usefulness ) # difference from optimal length (too little context vs long sentence) lenDiff = max( 0, min( 9, abs( C('optimal sentence length') - N ) -2 ) ) # calculate mmi mmi = 10000*N_k + 1000*lenDiff + usefulness if C('set due based on mmi'): nid2mmi[ nid ] = mmi # Fill in various fields/tags on the note based on cfg ts, fs = TAG.split( tags ), splitFields( flds ) # determine card type compTag, vocabTag, notReadyTag, alreadyKnownTag, priorityTag = tagNames = C('tag_comprehension'), C('tag_vocab'), C('tag_notReady'), C('tag_alreadyKnown'), C('tag_priority') if N_m == 0: # sentence comprehension card, m+0 ts = [ compTag ] + [ t for t in ts if t not in [ vocabTag, notReadyTag ] ] setField( mid, fs, C('focusMorph'), u'' ) elif N_k == 1: # new vocab card, k+1 ts = [ vocabTag ] + [ t for t in ts if t not in [ compTag, notReadyTag ] ] setField( mid, fs, C('focusMorph'), u'%s' % focusMorph.base ) elif N_k > 1: # M+1+ and K+2+ ts = [ notReadyTag ] + [ t for t in ts if t not in [ compTag, vocabTag ] ] # set type agnostic fields setField( mid, fs, C('k+N'), u'%d' % N_k ) setField( mid, fs, C('m+N'), u'%d' % N_m ) setField( mid, fs, C('morphManIndex'), u'%d' % mmi ) setField( mid, fs, C('unknowns'), u', '.join( u.base for u in unknowns ) ) setField( mid, fs, C('unmatures'), u', '.join( u.base for u in unmatures ) ) setField( mid, fs, C('unknownFreq'), u'%d' % F_k_avg ) # other tags if priorityTag in ts: ts.remove( priorityTag ) if isPriority: ts.append( priorityTag ) # update sql db tags_ = TAG.join( TAG.canonify( ts ) ) flds_ = joinFields( fs ) if flds != flds_ or tags != tags_: # only update notes that have changed csum = fieldChecksum( fs[0] ) sfld = stripHTML( fs[ getSortFieldIndex( mid ) ] ) ds.append( { 'now':now, 'tags':tags_, 'flds':flds_, 'sfld':sfld, 'csum':csum, 'usn':mw.col.usn(), 'nid':nid } ) mw.progress.update( value=i, label='Updating anki database...' ) mw.col.db.executemany( 'update notes set tags=:tags, flds=:flds, sfld=:sfld, csum=:csum, mod=:now, usn=:usn where id=:nid', ds ) TAG.register( tagNames ) # Now reorder new cards based on MMI mw.progress.update( value=i, label='Updating new card ordering...' ) ds = [] for ( cid, nid, due ) in db.execute( 'select id, nid, due from cards where type = 0' ): if nid in nid2mmi: # owise it was disabled due_ = nid2mmi[ nid ] if due != due_: # only update cards that have changed ds.append( { 'now':now, 'due':due_, 'usn':mw.col.usn(), 'cid':cid } ) mw.col.db.executemany( 'update cards set due=:due, mod=:now, usn=:usn where id=:cid', ds ) mw.reset() printf( 'Updated notes in %f sec' % ( time.time() - t_0 ) ) mw.progress.finish()
def updateNotes( allDb ): t_0, now, db, TAG = time.time(), intTime(), mw.col.db, mw.col.tags ds, nid2mmi = [], {} N_notes = db.scalar( 'select count() from notes' ) mw.progress.start( label='Updating data', max=N_notes, immediate=True ) fidDb = allDb.fidDb() locDb = allDb.locDb( recalc=False ) # fidDb() already forces locDb recalc # read tag names compTag, vocabTag, freshTag, notReadyTag, alreadyKnownTag, priorityTag, tooShortTag, tooLongTag = tagNames = jcfg('Tag_Comprehension'), jcfg('Tag_Vocab'), jcfg('Tag_Fresh'), jcfg('Tag_NotReady'), jcfg('Tag_AlreadyKnown'), jcfg('Tag_Priority'), jcfg('Tag_TooShort'), jcfg('Tag_TooLong') TAG.register( tagNames ) badLengthTag = jcfg2().get('Tag_BadLength') # handle secondary databases mw.progress.update( label='Creating seen/known/mature from all.db' ) seenDb = filterDbByMat( allDb, cfg1('threshold_seen') ) knownDb = filterDbByMat( allDb, cfg1('threshold_known') ) matureDb = filterDbByMat( allDb, cfg1('threshold_mature') ) mw.progress.update( label='Loading priority.db' ) priorityDb = MorphDb( cfg1('path_priority'), ignoreErrors=True ).db if cfg1('saveDbs'): mw.progress.update( label='Saving seen/known/mature dbs' ) seenDb.save( cfg1('path_seen') ) knownDb.save( cfg1('path_known') ) matureDb.save( cfg1('path_mature') ) mw.progress.update( label='Updating notes' ) for i,( nid, mid, flds, guid, tags ) in enumerate( db.execute( 'select id, mid, flds, guid, tags from notes' ) ): if i % 500 == 0: mw.progress.update( value=i ) C = partial( cfg, mid, None ) note = mw.col.getNote(nid) notecfg = getFilter(note) if notecfg is None or not notecfg['Modify']: continue # Get all morphemes for note morphemes = set() for fieldName in notecfg['Fields']: try: loc = fidDb[ ( nid, guid, fieldName ) ] morphemes.update( locDb[ loc ] ) except KeyError: continue # Determine un-seen/known/mature and i+N unseens, unknowns, unmatures, newKnowns = set(), set(), set(), set() for morpheme in morphemes: if morpheme not in seenDb.db: unseens.add( morpheme ) if morpheme not in knownDb.db: unknowns.add( morpheme ) if morpheme not in matureDb.db: unmatures.add( morpheme ) if morpheme not in matureDb.db and morpheme in knownDb.db: newKnowns.add( morpheme ) # Determine MMI - Morph Man Index N, N_s, N_k, N_m = len( morphemes ), len( unseens ), len( unknowns ), len( unmatures ) # Bail early for lite update if N_k > 2 and C('only update k+2 and below'): continue # average frequency of unknowns (ie. how common the word is within your collection) F_k = 0 for focusMorph in unknowns: # focusMorph used outside loop F_k += allDb.frequency(focusMorph) F_k_avg = F_k // N_k if N_k > 0 else F_k usefulness = F_k_avg # add bonus for morphs in priority.db isPriority = False for focusMorph in unknowns: if focusMorph in priorityDb: isPriority = True usefulness += C('priority.db weight') # add bonus for studying recent learned knowns (reinforce) for morpheme in newKnowns: locs = allDb.db[ morpheme ] if locs: ivl = min( 1, max( loc.maturity for loc in locs ) ) usefulness += C('reinforce new vocab weight') // ivl #TODO: maybe average this so it doesnt favor long sentences if any( morpheme.pos == u'動詞' for morpheme in unknowns ): #FIXME: this isn't working??? usefulness += C('verb bonus') usefulness = 999 - min( 999, usefulness ) # difference from optimal length range (too little context vs long sentence) lenDiffRaw = min(N - C('min good sentence length'), max(0, N - C('max good sentence length'))) lenDiff = min(9, abs(lenDiffRaw)) # calculate mmi mmi = 10000*N_k + 1000*lenDiff + usefulness if C('set due based on mmi'): nid2mmi[ nid ] = mmi # Fill in various fields/tags on the note based on cfg ts, fs = TAG.split( tags ), splitFields( flds ) # clear any 'special' tags, the appropriate will be set in the next few lines ts = [ t for t in ts if t not in [ notReadyTag, compTag, vocabTag, freshTag ] ] # determine card type if N_m == 0: # sentence comprehension card, m+0 ts = ts + [ compTag ] setField( mid, fs, jcfg('Field_FocusMorph'), u'' ) elif N_k == 1: # new vocab card, k+1 ts = ts + [ vocabTag ] setField( mid, fs, jcfg('Field_FocusMorph'), u'%s' % focusMorph.base ) elif N_k > 1: # M+1+ and K+2+ ts = ts + [ notReadyTag ] setField( mid, fs, jcfg('Field_FocusMorph'), u'') elif N_m == 1: # we have k+0, and m+1, so this card does not introduce a new vocabulary -> card for newly learned morpheme ts = ts + [ freshTag ] setField( mid, fs, jcfg('Field_FocusMorph'), u'%s' % list(unmatures)[0].base) else: # only case left: we have k+0, but m+2 or higher, so this card does not introduce a new vocabulary -> card for newly learned morpheme ts = ts + [ freshTag ] setField( mid, fs, jcfg('Field_FocusMorph'), u'') # set type agnostic fields setField( mid, fs, jcfg('Field_UnknownMorphCount'), u'%d' % N_k ) setField( mid, fs, jcfg('Field_UnmatureMorphCount'), u'%d' % N_m ) setField( mid, fs, jcfg('Field_MorphManIndex'), u'%d' % mmi ) setField( mid, fs, jcfg('Field_Unknowns'), u', '.join( u.base for u in unknowns ) ) setField( mid, fs, jcfg('Field_Unmatures'), u', '.join( u.base for u in unmatures ) ) setField( mid, fs, jcfg('Field_UnknownFreq'), u'%d' % F_k_avg ) # remove deprecated tag if badLengthTag is not None and badLengthTag in ts: ts.remove( badLengthTag ) # other tags if priorityTag in ts: ts.remove( priorityTag ) if isPriority: ts.append( priorityTag ) if tooShortTag in ts: ts.remove( tooShortTag ) if lenDiffRaw < 0: ts.append( tooShortTag ) if tooLongTag in ts: ts.remove( tooLongTag ) if lenDiffRaw > 0: ts.append( tooLongTag ) # remove unnecessary tags if not jcfg('Option_SetNotRequiredTags'): unnecessary = [priorityTag, tooShortTag, tooLongTag] ts = [tag for tag in ts if tag not in unnecessary] # update sql db tags_ = TAG.join( TAG.canonify( ts ) ) flds_ = joinFields( fs ) if flds != flds_ or tags != tags_: # only update notes that have changed csum = fieldChecksum( fs[0] ) sfld = stripHTML( fs[ getSortFieldIndex( mid ) ] ) ds.append( { 'now':now, 'tags':tags_, 'flds':flds_, 'sfld':sfld, 'csum':csum, 'usn':mw.col.usn(), 'nid':nid } ) mw.progress.update( value=i, label='Updating anki database...' ) mw.col.db.executemany( 'update notes set tags=:tags, flds=:flds, sfld=:sfld, csum=:csum, mod=:now, usn=:usn where id=:nid', ds ) # Now reorder new cards based on MMI mw.progress.update( value=i, label='Updating new card ordering...' ) ds = [] # "type = 0": new cards # "type = 1": learning cards [is supposed to be learning: in my case no learning card had this type] # "type = 2": review cards for ( cid, nid, due ) in db.execute( 'select id, nid, due from cards where type = 0' ): if nid in nid2mmi: # owise it was disabled due_ = nid2mmi[ nid ] if due != due_: # only update cards that have changed ds.append( { 'now':now, 'due':due_, 'usn':mw.col.usn(), 'cid':cid } ) mw.col.db.executemany( 'update cards set due=:due, mod=:now, usn=:usn where id=:cid', ds ) mw.reset() printf( 'Updated notes in %f sec' % ( time.time() - t_0 ) ) mw.progress.finish() return knownDb
def mkAllDb( allDb=None ): import config; reload(config) t_0, db, TAG = time.time(), mw.col.db, mw.col.tags N_notes = db.scalar( 'select count() from notes' ) N_enabled_notes = 0 # for providing an error message if there is no note that is used for processing mw.progress.start( label='Prep work for all.db creation', max=N_notes, immediate=True ) if not allDb: allDb = MorphDb() fidDb = allDb.fidDb() locDb = allDb.locDb( recalc=False ) # fidDb() already forces locDb recalc mw.progress.update( label='Generating all.db data' ) for i,( nid, mid, flds, guid, tags ) in enumerate( db.execute( 'select id, mid, flds, guid, tags from notes' ) ): if i % 500 == 0: mw.progress.update( value=i ) C = partial( cfg, mid, None ) note = mw.col.getNote(nid) notecfg = getFilter(note) if notecfg is None: continue morphemizer = getMorphemizerByName(notecfg['Morphemizer']) N_enabled_notes += 1 mats = [ ( 0.5 if ivl == 0 and ctype == 1 else ivl ) for ivl, ctype in db.execute( 'select ivl, type from cards where nid = :nid', nid=nid ) ] if C('ignore maturity'): mats = [ 0 for mat in mats ] ts, alreadyKnownTag = TAG.split( tags ), jcfg('Tag_AlreadyKnown') if alreadyKnownTag in ts: mats += [ C('threshold_mature')+1 ] for fieldName in notecfg['Fields']: try: # if doesn't have field, continue #fieldValue = normalizeFieldValue( getField( fieldName, flds, mid ) ) fieldValue = extractFieldData( fieldName, flds, mid ) except KeyError: continue except TypeError: mname = mw.col.models.get( mid )[ 'name' ] errorMsg( u'Failed to get field "{field}" from a note of model "{model}". Please fix your config.py file to match your collection appropriately and ignore the following error.'.format( model=mname, field=fieldName ) ) raise loc = fidDb.get( ( nid, guid, fieldName ), None ) if not loc: loc = AnkiDeck( nid, fieldName, fieldValue, guid, mats ) ms = getMorphemes(morphemizer, fieldValue, ts) if ms: #TODO: this needed? should we change below too then? #printf( ' .loc for %d[%s]' % ( nid, fieldName ) ) locDb[ loc ] = ms else: # mats changed -> new loc (new mats), move morphs if loc.fieldValue == fieldValue and loc.maturities != mats: #printf( ' .mats for %d[%s]' % ( nid, fieldName ) ) newLoc = AnkiDeck( nid, fieldName, fieldValue, guid, mats ) locDb[ newLoc ] = locDb.pop( loc ) # field changed -> new loc, new morphs elif loc.fieldValue != fieldValue: #printf( ' .morphs for %d[%s]' % ( nid, fieldName ) ) newLoc = AnkiDeck( nid, fieldName, fieldValue, guid, mats ) ms = getMorphemes(morphemizer, fieldValue, ts) locDb.pop( loc ) locDb[ newLoc ] = ms if N_enabled_notes == 0: mw.progress.finish() errorMsg(u'There is no card that can be analyzed or be moved. Add cards or (re-)check your configuration under "Tools -> MorhpMan Preferences" or in "Anki/addons/morph/config.py" for mistakes.') return None printf( 'Processed all %d notes in %f sec' % ( N_notes, time.time() - t_0 ) ) mw.progress.update( value=i, label='Creating all.db object' ) allDb.clear() allDb.addFromLocDb( locDb ) if cfg1('saveDbs'): mw.progress.update( value=i, label='Saving all.db to disk' ) allDb.save( cfg1('path_all') ) printf( 'Processed all %d notes + saved all.db in %f sec' % ( N_notes, time.time() - t_0 ) ) mw.progress.finish() return allDb
def usage(prog): printf("Usage is %s <imageId> <flavorId> <serverName> <skelPath>\n",prog) printf("\n") printf("Build a cloud server based on credentials supplited in the\n") printf("auth_headers.db. Use the getauth script to populate the\n") printf(".db file\n")
def usage(prog): printf("usage is %s <json_config_file> [delay_secs]\n",prog) printf("\n") printf("if delay_secs is specified then script will wait for the\n") printf("specified number of seconds inbween lb builds\n")