def fit(self, reporter=None) -> typing.Collection[Task]: subExps = self.apply(True) try: if len(subExps) > 1: all_units_of_work = [] for x in subExps: m = x.config() if "num_seeds" in m: del m["num_seeds"] if reporter is not None and reporter.isCanceled(): save_yaml(self.getSummaryYamlPath(), "Cancelled") return [] for i in x.fit(reporter): all_units_of_work.append(i) if reporter is not None and reporter.isCanceled(): save_yaml(self.getSummaryYamlPath(), "Cancelled") return [] def c(): self.result() t = Task(c) t.deps = all_units_of_work.copy() all_units_of_work.append(t) return all_units_of_work if not self.onlyReports: self.cleanup() self.setInProgress(True) cfg = self.parse_config() cfg.gpus = self.gpus cfg._reporter = reporter units_of_work = [] if self.time > 0: cfg.callbacks = cfg.callbacks + [TimeoutCallback(self, cfg)] if self.onlyReports: units_of_work.append(Task(lambda: cfg.generateReports())) else: if hasattr(self, "folds") and self.folds: units_of_work = units_of_work + cfg.fit( parallel=True, foldsToExecute=self.folds) else: units_of_work = units_of_work + cfg.fit(parallel=True) r = Task(lambda: self.result()) r.deps = units_of_work.copy() units_of_work.append(r) r = Task(lambda ts: self.onExperimentFinished(ts), runOnErrors=True, needs_tasks=True) r.deps = units_of_work.copy() units_of_work.append(r) return units_of_work except: self.onExperimentFinished() self._onErrors([Error()])
def perform(self, server, reporter: ProgressMonitor): project = server.project(self.path) r=project.introspect() try: import segmentation_models except: pass if self.outPath is not None: save_yaml(self.outPath,r) return r
def init_shapes(self, dataset): if dataset is None: dataset = self.get_dataset() self._dataset = dataset if self.preprocessing is not None: dataset = net.create_preprocessor_from_config(self.declarations, dataset, self.preprocessing, self.imports) predItem = dataset[0] if hasattr(dataset, "contribution"): utils.save(self.path+ ".contribution",getattr(dataset, "contribution")) elif hasattr(dataset, "contributions"): utils.save(self.path+ ".contribution",getattr(dataset, "contributions")) utils.save_yaml(self.path + ".shapes", (_shape(predItem.x), _shape(predItem.y))) return dataset
def dumpTo(self, path, extra, remove=()): c = self.config().copy() for k in extra: c[k] = extra[k] for r in remove: del c[r] return save_yaml(constructConfigYamlConcretePath(path), c)
def mark_loaded(root, url): fullPath = os.path.join(root, ".metadata") utils.ensure(fullPath) fullPath = os.path.join(fullPath, "downloaded_deps.yaml") try: loaded_yaml = load_yaml(fullPath) except: loaded_yaml = {"dependencies": []} deps = loaded_yaml["dependencies"] deps.append(url) utils.save_yaml(fullPath, loaded_yaml)
def save(self, path): utils.save_yaml(path, self.folds)
def _onErrors(self, errors): save_yaml(self.getErrorYamlPath(), {"errors": [x.log() for x in errors]}) save_yaml(self.getSummaryYamlPath(), "Error")
def setInProgress(self, val: bool): if val and not self.isInProgress(): save_yaml(self.getInProgressYamlPath(), True) elif not val and self.isInProgress(): delete_file(self.getInProgressYamlPath())
def result(self, forseRecalc=False, use_primary_metric=False): if self.canceled_by_timer: return pi = self.apply(True) if forseRecalc: self.cleanup() m = self.metrics() if m is None: return None if self.hyperparameters() is not None: return if len(pi) > 1: vals = [] for i in pi: if i.isCompleted() or True: if forseRecalc: i.cleanup() i.generateMetrics() m = i.metrics() pm = i.config()["primary_metric"] if "val_" in pm: pm = pm[4:] mv = pm if pm + "_holdout" in m["allStages"]: mv = m["allStages"][pm + "_holdout"] if "experiment_result" in i.config(): mv = m["allStages"][i.config()["experiment_result"]] vals.append(mv) m = np.mean(vals) save_yaml( self.getSummaryYamlPath(), { "mean": float(m), "max": float(np.max(vals)), "min": float(np.min(vals)), "std": float(np.std(vals)) }) return float(m) else: m = self.metrics() if isinstance(m, dict): pm = None if "primary_metric" in self.config(): pm = self.config()["primary_metric"] else: cfg = self.parse_config() if cfg.final_metrics is not None and len( cfg.final_metrics) > 0: pm = cfg.final_metrics[0] if pm is not None: if "val_" in pm: pm = pm[4:] mv = pm if pm + "_holdout" in m["allStages"]: mv = m["allStages"][pm + "_holdout"] return mv elif "experiment_result" in self.config(): am = self.config()["experiment_result"] if am in m["allStages"]: return m["allStages"][am] elif True: if pm in m["allStages"]: return m["allStages"][pm]["mean"] return mv if isinstance(m, float): return m if isinstance(m, int): return m return 1000000 pass