def htmlRepr(self, obj, top_rec_obj): try: val_obj = obj.get(self.mName) if obj else None repr_text = None if val_obj is 0: return ("0", self.getMainKind()) if val_obj: if self.mIsSeq: seq = [] for it_obj in val_obj: it_repr = self._htmlRepr(it_obj) if it_repr: seq.append(it_repr) repr_text = ', '.join(seq) else: repr_text = self._htmlRepr(val_obj) if repr_text is None: return ("-", "none") if not repr_text: return (" ", "none") return (repr_text, self.getMainKind()) except Exception: logException("Problem with attribute %s: obj = %r" % (self.getFullName(), val_obj)) return ("???", "none")
def parseSeq(self, cond_seq): imp_op_units = set() for cond_data in cond_seq: if cond_data[0] == "import": imp_op_units.add(cond_data[1]) for op_unit_name in (set(self.mCompData.keys()) - imp_op_units): del self.mCompData[op_unit_name] used_op_units = set() for idx, cond_data in enumerate(cond_seq): if cond_data[0] == "import": op_unit_name = cond_data[1] if op_unit_name in used_op_units: self.mBadIdxs.append(idx) else: self.importUnit(idx, op_unit_name, self.mCondEnv.joinAnd(self.mSeq)) used_op_units.add(op_unit_name) continue try: cond = self.parse(cond_data) self.mSeq.append(cond) except Exception: logException("Bad instruction: %r" % cond_data, error_mode=False) self.mBadIdxs.append(idx)
def __init__(self, cond_env, code): self.mCondEnv = cond_env self.mFragments = [] self.mCode = code self.mError = None self.mImportFragments = {} try: top_d = ast.parse(self.mCode) except SyntaxError as err: txt_len = len(err.text.rstrip()) self.mError = ("Syntax error", max(0, err.lineno), max(0, min(err.offset, txt_len - 1))) self.mFragments = None return try: last_instr = len(top_d.body) - 1 for idx, instr_d in enumerate(top_d.body): self.processInstr(instr_d, idx == last_instr) if self.mError is None: self.arrangeDiapasons() except Exception as err: if self.mError is None: logException("Exception on parse tree code") raise err self.mFragments = None
def cacheFilter(self, filter_name, cond_seq, time_label): try: op_env = CondOpEnv(self.mCondEnv, None, cond_seq, name=filter_name) cond_entry = (op_env, self.checkResearchBlock(cond_seq), time_label) except: logException("Bad filter %s compilation for ws=%s" % (filter_name, self.mWS.getName()), error_mode=False) return False self.mFilterCache[filter_name] = cond_entry return True
def __init__(self, application, vault_dir): self.mApp = application self.mVaultDir = os.path.abspath(vault_dir) self.mLock = Lock() self.mDataSets = dict() workspaces = [] names = [[], []] for active_path in glob(self.mVaultDir + "/*/active"): ds_path = os.path.dirname(active_path) info_path = ds_path + "/dsinfo.json" if not os.path.exists(info_path): continue with open(info_path, "r", encoding="utf-8") as inp: ds_info = json.loads(inp.read()) if ds_info["kind"] == "xl": assert ds_info["name"] not in self.mDataSets try: ds_h = XLDataset(self, ds_info, ds_path) except: logException("Bad XL-dataset load: " + ds_info["name"]) continue self.mDataSets[ds_info["name"]] = ds_h names[0].append(ds_info["name"]) else: assert ds_info["kind"] == "ws" workspaces.append((ds_info, ds_path)) for ds_info, ds_path in workspaces: assert ds_info["name"] not in self.mDataSets try: ws_h = Workspace(self, ds_info, ds_path) except: logException("Bad WS-dataset load: " + ds_info["name"]) continue self.mDataSets[ds_info["name"]] = ws_h names[1].append(ds_info["name"]) logging.info("Vault %s started with %d/%d datasets" % (self.mVaultDir, len(names[0]), len(names[1]))) if len(names[0]) > 0: logging.info("XL-datasets: " + " ".join(names[0])) if len(names[1]) > 0: logging.info("WS-datasets: " + " ".join(names[1]))
def finishRequest(cls, serv_h, rq_path, rq_args, data_vault): try: return cls._finishRequest(serv_h, rq_path, rq_args, data_vault) except: logException("Exception on evaluation request") return cls.notFoundResponse(serv_h)