def _attributes(self, resource_root_path, resource_root_rel, force_fix_to_abs): a = '' for k, v in self.attributes.items(): if k in [ 'onclick', 'onchange', 'onmouseover', 'onmouseout', 'onkeydown', 'onload' ]: # not sure why i cared so much to make this an err before warn('JS pipeline not prepared for html even handlers') if v is None: a += f' {k}' elif isinstsafe(v, CSS_Style_Attribute): a += f' {k}="{repr(v)}"' elif isinstance(v, MagicLink.TempAbsPath) or not isblankstr(v): if isinstance(v, MagicLink.TempAbsPath): if force_fix_to_abs: v = merge_overlapping(resource_root_path, v.abs) else: v = merge_overlapping(resource_root_path, v.abs) elif isinstance(v, MagicLink.TempRelToResPath): if force_fix_to_abs: v = merge_overlapping(resource_root_path, v.rel) else: v = os.path.join(resource_root_rel, v.rel) a += f' {k}="{v}"' return a
def gen(): # twentyData = [] # twentyLabel = [] twentyPairs = [] i = 0 # did this? warn('NEED TO MERGE getReal and PREPROCESSOR CODE. USE PREPROCESSOR.') sfilt = MattSalienceFilter() with Progress(len(self.imds)) as prog: for imd in self.imds: i += 1 if i <= nnstate.FLAGS.batchsize: if nnstate.FLAGS.salience: the_new = imd data = File(imd.file).load() if nnstate.FLAGS.cfg_cfg['full_cfg']['SFILT']: data = sfilt.transform(data) the_new.data = preprocessors(HW)[pp_type].preprocess(data) # I think I fixed this. problem was preprocess resize was not resizing if one of the dimensions was right but not the other. Used an 'and' when I should have used an 'or'. # if (str(type(the_new.data)) != "<class 'numpy.ndarray'>") or ( # str(the_new.data.dtype) != "float32") or str( # the_new.data.shape) != '(299, 299, 3)': # debug # breakpoint() # log('finished preprocess') the_new.label = self.class_label_map[imd.clazz] else: the_new = getReal((imd, HW), self.class_label_map, self.normalize_single_ims, self.std_d, self.USING_STD_DIR) twentyPairs += [ the_new ] # twentyData.append(imd.data) # twentyLabel.append(imd.label) if i == nnstate.FLAGS.batchsize: # batch = SimpleNamespace() # batch.data = twentyData # batch.label = twentyLabel yield ( [imd.data for imd in twentyPairs], [imd.label for imd in twentyPairs] ) twentyPairs.clear() # twentyData = [] # twentyLabel = [] i = 0 # this is maybe better than logging in fill_cmat because it also works during net.predict() prog.tick()
def _save(self, pretrained=False): model_save_file = f'_arch/{self.ARCH_LABEL}' if pretrained: model_save_file = f'{model_save_file}_pretrained' try: self.net.save(model_save_file) self.net.save(f'{model_save_file}.h5') log('saved model') except TypeError: warn(f'could not save model due to tf bug') File(model_save_file).deleteIfExists() File(f'{model_save_file}.h5').deleteIfExists()
def backup(self): if not self.exists: warn(f'cannot back up {self}, which does not exist') return backup_folder = self.parent['backups'].mkdir() assert backup_folder.isdir i = 0 while True: i += 1 backup_file = backup_folder[f'{self.name}.backup{i}'] if not backup_file.exists: backup_file.write(self.read()) break
def write_weight_reports(self): import h5py weights_file = h5py.File(self.weightsf(), "r") weights_report_file = self.arch_summary_folder[ f'{self.ARCH_LABEL}_weights.txt'] o_weights_report_file = self.arch_summary_folder[ f'{self.ARCH_LABEL}_weights_matlab.txt'] weights_report_file.write('') def processGroup(group, rep, indent=0): for ke in listkeys(group): rep += '\t' * indent rep += ke item = group[ke] if 'Dataset' in cn(item): # c = 'Dataset' rep += f'\t\t{item.shape} {item.dtype}\n' elif 'Group' in cn(item): # c = 'Group' rep += '\n' rep = processGroup(item, rep, indent + 1) # sub = f'{item.shape} {item.dtype}' else: err(f'what is this: {cn(item)}') return rep report = '' report = processGroup(weights_file, report) log('writing weights report...') weights_report_file.write(report) log('finished writing weights report') log('writing matlab weight report...') warn( 'THERE ARE 2 VERSIONS OF THE ONNX FILES IN _weights/matlab AND I DONT KNOW THE DIFFERENCE' ) import onnx o_model = onnx.load(self.oweightsf()) o_weights_report_file.write(repr(o_model.graph.node)) log('finished writing matlab weight report...')
def __init__(self, file, just_sync_at_end=True, offline=False): if offline: Database.offline_mode = True self.__dict__['just_sync_at_end'] = just_sync_at_end super().__init__(file) if not self.offline_mode: if file.wc.exists: self.pull() else: if not file.exists: self._hard_reset() self.push() else: warn( f'{self} is not preforming initial sync since {self.offline_mode=}' ) write_webloc(file.abspath.replace('.json', '.webloc'), file.wcurl) log(f'database url: {file.wcurl=}') if just_sync_at_end: atexit.register(self.push)
def __init__(self, database, *args, allow_get=True, allow_set=True, password=None, **kwargs): apiFile = self.apifile_for(database.file) self.database = database self.allow_get = allow_get self.allow_set = allow_set self.password = password super().__init__(apiFile, *args, **kwargs) if not self.offline_mode: weval( If( wl.Not( wl.FileExistsQ( wl.CloudObject("APIPasswords", wlexpr("$CloudSymbolBase")))), wlexpr('CloudSymbol["APIPasswords"]=<||>'))) weval( wlexpr( f'pwds = CloudSymbol["APIPasswords"]; pwds["{apiFile.abspath}"] = "{self.password}"; CloudSymbol["APIPasswords"] = pwds;' )) else: warn( f'not pushing password for {self} because offline mode is switched one' ) apiFile.parent[f"{apiFile.name_pre_ext}_api_doc.text"].write( f"HTTP GET: {apiFile.wcurl}?<url encoded json obj>")
def push(self): if not self.offline_mode: self.file.wc.push() else: warn(f'not pushing {self} because offline mode is switched one')
def __post_init__(self): self.service = WolframService() self.build_api_fun(self.service) expression = APIFunction( [ APIRule("message", "String"), APIRule("index", "Integer"), APIRule("total", "Integer"), APIRule("messageID", "String"), ], Function( wlexprc('xx'), wlblock( wlexpr('CloudSymbol["APILog"] = "started api base"'), If( wl.Not( wl.FileExistsQ( wl.CloudObject("APIMessages", wlexpr("$CloudSymbolBase")))), wlexpr('CloudSymbol["APIMessages"]=<||>')), wl.For( wlexpr('i=1'), wlexpr('i<=Length@Keys[CloudSymbol["APIMessages"]]'), wlexpr('i++'), If( wlexpr( '((UnixTime[] * 1000) - ToExpression[StringSplit[(Keys[CloudSymbol["APIMessages"]])[[i]],"***"][[2]]]) > 60000' ), wlexpr( 'apiMessages = CloudSymbol["APIMessages"]; apiMessages = KeyDrop[apiMessages,Keys[CloudSymbol["APIMessages"]][[i]]]; CloudSymbol["APIMessages"] = apiMessages;' ))), If( wl.Not( wl.KeyExistsQ(wl.CloudSymbol("APIMessages"), wlexpr('xx["messageID"]'))), wlexpr( 'APIMessages=CloudSymbol["APIMessages"]; APIMessages[xx["messageID"]] = {}; CloudSymbol["APIMessages"] = APIMessages;' )), wlexpr( 'thisMessageData = <|"i"->xx["index"],"t"->xx["total"],"m"->xx["message"]|>; APIMessages=CloudSymbol["APIMessages"]; myMessages = APIMessages[xx["messageID"]]; myMessages = Append[myMessages,thisMessageData]; APIMessages[xx["messageID"]] = myMessages; CloudSymbol["APIMessages"] = APIMessages;' ), If( wlexpr('thisMessageData["i"]==thisMessageData["t"]'), wlblock( wlexpr( 'fullMessage = StringJoin[Map[Function[xxxx,xxxx["m"]],CloudSymbol["APIMessages"][xx["messageID"]]]]' ), wlexpr( 'fullMessage = ImportString[fullMessage,"RawJSON"]' ), *self.service.build_expressions(), )), ))) expression = FormatWLInput(inputForm(expression)) self.apiFile = File(self.apiFile) assert self.apiFile.ext == 'wl', f'extension of {self.apiFile} is {self.apiFile.ext}' self.apiURL = self.apiFile.wcurl self.apiFile.write(expression) if not self.offline_mode: MWL.cloud_deploy(expression, self.apiFile, PERMISSIONS.PUBLIC) else: warn(f'not deploying {self} because offline mode is switched one') log(f'{self.apiURL=}')
def sendlongpass(self): warn('huge security risk 2') with open('/Users/matt/.passlong', 'r') as f: s = f.read()[::-1] self.p.sendline(s)
def sendpass(self): warn('huge security risk 1') with open('/Users/matt/.pass', 'r') as f: self.p.sendline(f.read()[::-1])