def save(self, knowl, who, most_recent=None, minor=False): """who is the ID of the user, who wants to save the knowl""" if most_recent is None: most_recent = self.get_knowl(knowl.id, ['id'] + self._default_fields, allow_deleted=False) new_knowl = most_recent is None if new_knowl: authors = [] else: authors = most_recent.pop('authors', []) if not minor and who and who not in authors: authors = authors + [who] search_keywords = make_keywords(knowl.content, knowl.id, knowl.title) cat = extract_cat(knowl.id) # When renaming, source is set explicitly on the knowl if knowl.type == 0 and knowl.source is not None: typ, source, name = 0, knowl.source, knowl.source_name else: typ, source, name = extract_typ(knowl.id) links = extract_links(knowl.content) defines = extract_defines(knowl.content) # id, authors, cat, content, last_author, timestamp, title, status, type, links, defines, source, source_name values = (knowl.id, authors, cat, knowl.content, who, knowl.timestamp, knowl.title, knowl.status, typ, links, defines, source, name, search_keywords) with DelayCommit(self): inserter = SQL( "INSERT INTO kwl_knowls (id, {0}, _keywords) VALUES ({1})") inserter = inserter.format( SQL(', ').join(map(Identifier, self._default_fields)), SQL(", ").join(Placeholder() * (len(self._default_fields) + 2))) self._execute(inserter, values) self.cached_titles[knowl.id] = knowl.title
def save(self, knowl, who): """who is the ID of the user, who wants to save the knowl""" new_history_item = self.get_knowl(knowl.id, ['id'] + self._default_fields + ['history']) new_knowl = new_history_item is None if new_knowl: history = [] authors = [] else: history = new_history_item.pop('history') if history is not None: history += [new_history_item] else: history = [] authors = new_history_item.pop('authors', []) if authors is None: authors = [] if who and who not in authors: authors = authors + [who] search_keywords = make_keywords(knowl.content, knowl.id, knowl.title) cat = extract_cat(knowl.id) values = (authors, cat, knowl.content, who, knowl.quality, knowl.timestamp, knowl.title, history, search_keywords) with DelayCommit(self): insterer = SQL( "INSERT INTO kwl_knowls (id, {0}, history, _keywords) VALUES (%s, {1}) ON CONFLICT (id) DO UPDATE SET ({0}, history, _keywords) = ({1})" ) insterer = insterer.format( SQL(', ').join(map(Identifier, self._default_fields)), SQL(", ").join(Placeholder() * (len(self._default_fields) + 2))) self._execute(insterer, (knowl.id, ) + values + values) self.save_history(knowl, who) self.cached_titles[knowl.id] = knowl.title
def search_distinct( table, selecter, counter, iterator, query={}, projection=1, limit=None, offset=0, sort=None, info=None, include_deleted=False, include_pending=False, more=False, ): """ Replacement for db.*.search to account for versioning, return Web* objects. Doesn't support split_ors, raw or extra tables. Always computes count. INPUT: - ``table`` -- a search table, such as db.seminars or db.talks - ``counter`` -- an SQL object counting distinct entries - ``selecter`` -- an SQL objecting selecting distinct entries - ``iterator`` -- an iterator taking the same arguments as ``_search_iterator`` """ if offset < 0: raise ValueError("Offset cannot be negative") query = dict(query) if not include_deleted: query["deleted"] = {"$or": [False, {"$exists": False}]} all_cols = SQL(", ").join( map(IdentifierWrapper, ["id"] + table.search_cols)) search_cols, extra_cols = table._parse_projection(projection) tbl = IdentifierWrapper(table.search_table) if limit is None: qstr, values = table._build_query(query, sort=sort) else: qstr, values = table._build_query(query, limit, offset, sort) prequery = {} if include_pending else { '$or': [{ 'display': True }, { 'by_api': False }] } if prequery: # We filter the records before finding the most recent (normal queries filter after finding the most recent) # This is mainly used for setting display=False or display=True # We take advantage of the fact that the WHERE clause occurs just after the table name in all of our query constructions pqstr, pqvalues = table._parse_dict(prequery) if pqstr is not None: tbl = tbl + SQL(" WHERE {0}").format(pqstr) values = pqvalues + values if more is not False: # might empty dictionary more, moreval = table._parse_dict(more) if more is None: more = Placeholder() moreval = [True] cols = SQL(", ").join( list(map(IdentifierWrapper, search_cols + extra_cols)) + [more]) extra_cols = extra_cols + ("more", ) values = moreval + values else: cols = SQL(", ").join(map(IdentifierWrapper, search_cols + extra_cols)) fselecter = selecter.format(cols, all_cols, tbl, qstr) cur = table._execute( fselecter, values, buffered=False, slow_note=( table.search_table, "analyze", query, repr(projection), limit, offset, ), ) results = iterator(cur, search_cols, extra_cols, projection) if info is not None: # caller is requesting count data nres = count_distinct(table, counter, query) if limit is None: info["number"] = nres return results if offset >= nres > 0: # We're passing in an info dictionary, so this is a front end query, # and the user has requested a start location larger than the number # of results. We adjust the results to be the last page instead. offset -= (1 + (offset - nres) / limit) * limit if offset < 0: offset = 0 return search_distinct( table, selecter, counter, iterator, query, projection, limit, offset, sort, info, ) info["query"] = dict(query) info["number"] = nres info["count"] = limit info["start"] = offset info["exact_count"] = True res = list(results) return res
def _reload_meta(self, meta_name, filename, search_table, sep="|"): meta_cols, _, jsonb_idx = _meta_cols_types_jsonb_idx(meta_name) # the column which will match search_table table_name = _meta_table_name(meta_name) table_name_idx = meta_cols.index(table_name) table_name_sql = Identifier(table_name) meta_name_sql = Identifier(meta_name) meta_name_hist_sql = Identifier(meta_name + "_hist") with open(filename, "r") as F: lines = [line for line in csv.reader(F, delimiter=str(sep))] if len(lines) == 0: return for line in lines: if line[table_name_idx] != search_table: raise RuntimeError( "in %s column %d (= %s) in the file " "doesn't match the search table name %s" % (filename, table_name_idx, line[table_name_idx], search_table)) with DelayCommit(self, silence=True): # delete the current columns self._execute( SQL("DELETE FROM {} WHERE {} = %s").format( meta_name_sql, table_name_sql), [search_table], ) # insert new columns with open(filename, "r") as F: try: cur = self._db.cursor() cur.copy_from(F, meta_name, columns=meta_cols, sep=sep) except Exception: self.conn.rollback() raise version = self._get_current_meta_version(meta_name, search_table) + 1 # copy the new rows to history cols_sql = SQL(", ").join(map(Identifier, meta_cols)) rows = self._execute( SQL("SELECT {} FROM {} WHERE {} = %s").format( cols_sql, meta_name_sql, table_name_sql), [search_table], ) cols = meta_cols + ("version", ) cols_sql = SQL(", ").join(map(Identifier, cols)) place_holder = SQL(", ").join(Placeholder() * len(cols)) query = SQL("INSERT INTO {} ({}) VALUES ({})").format( meta_name_hist_sql, cols_sql, place_holder) for row in rows: row = [ Json(elt) if i in jsonb_idx else elt for i, elt in enumerate(row) ] self._execute(query, row + [version])
def update(self, an_id: id = None, where_key: str = None, name: str = None, data=None, notes: str = None, modified_by: str = None, created_by: str = None, my_conn: Optional[dict] = None, t_log: Optional[TimeLogger] = None, verbose: bool = None): """Insert New Record Into grouped_physics_object""" if my_conn is None: my_conn = self.my_conn else: self.my_conn = my_conn if verbose is True and t_log is None: t_log = TimeLogger() my_conn = my_connect(my_conn=my_conn, t_log=t_log, verbose=verbose) conn = my_conn['conn'] db_params = my_conn['db_params'] if where_key is None: where_key = self.id_name() if an_id is None: warn("No Record ID Specified", NoRecordIDError) else: if data is None: data = {} data.update(add_field('name', name)) data.update(add_field('notes', notes)) data.update(add_field('created_by', created_by)) # If there is no data, then skip. Of course one could still change modified by: if len(data) > 0 or modified_by is not None: # Always require a modified by and because one can change data without specifying a modifer, # this is necessary. We don't check it before the previous if, because we don't want to create # a modified_by if not data was set and no modified_by was set. if modified_by is None: modified_by = db_params['user'] data.update(modified_by=modified_by) fields = data.keys() sql = "UPDATE {table} SET {fields} WHERE {pkey} = {a_value}" if verbose: print('Data:\n', data) print('\nFields:\n', fields) query = SQL(sql).format( table=Identifier(self.table_name), fields=SQL(', ').join( Composed([Identifier(k), SQL(' = '), Placeholder(k)]) for k in fields ), pkey=Identifier(where_key), a_value=Placeholder('where_key') ) data.update(where_key=an_id) cur = conn.cursor(cursor_factory=NamedTupleCursor) if verbose: print(query.as_string(conn)) print(cur.mogrify(query, data)) try: cur.execute(query, data) except OperationalError as error: print(error) conn.commit() cur.close() self.pull_data()
def save(self, data): if not self._rw_userdb: logger.info("no attempt to save, not enough privileges") return; data = dict(data) # copy uid = data.pop("username",None) if not uid: raise ValueError("data must contain username") if not self.user_exists(uid): raise ValueError("user does not exist") if not data: raise ValueError("no data to save") fields, values = zip(*data.items()) updater = SQL("UPDATE userdb.users SET ({0}) = ({1}) WHERE username = %s").format(SQL(", ").join(map(Identifier, fields)), SQL(", ").join(Placeholder() * len(values))) self._execute(updater, list(values) + [uid])
def rename_table(self, old_name, new_name, commit=True): """ Rename a table. INPUT: - ``old_name`` -- the current name of the table, as a string - ``new_name`` -- the new name of the table, as a string """ assert old_name != new_name assert new_name not in self.tablenames with DelayCommit(self, commit, silence=True): table = self[old_name] # first rename indexes and constraints icols = [Identifier(s) for s in ["index_name", "table_name"]] ccols = [Identifier(s) for s in ["constraint_name", "table_name"]] rename_index = SQL("ALTER INDEX IF EXISTS {0} RENAME TO {1}") rename_constraint = SQL("ALTER TABLE {0} RENAME CONSTRAINT {1} TO {2}") for meta, mname, cols in [ ("meta_indexes", "index_name", icols), ("meta_indexes_hist", "index_name", icols), ("meta_constraints", "constraint_name", ccols), ("meta_constraints_hist", "constraint_name", ccols), ]: indexes = list(self._execute( SQL("SELECT {0} FROM {1} WHERE table_name = %s").format( Identifier(mname), Identifier(meta) ), [old_name], )) if indexes: rename_index_in_meta = SQL("UPDATE {0} SET ({1}) = ({2}) WHERE {3} = {4}") rename_index_in_meta = rename_index_in_meta.format( Identifier(meta), SQL(", ").join(cols), SQL(", ").join(Placeholder() * len(cols)), cols[0], Placeholder(), ) for old_index_name in indexes: old_index_name = old_index_name[0] new_index_name = old_index_name.replace(old_name, new_name) self._execute(rename_index_in_meta, [new_index_name, new_name, old_index_name]) if meta == "meta_indexes": self._execute(rename_index.format( Identifier(old_index_name), Identifier(new_index_name), )) elif meta == "meta_constraints": self._execute(rename_constraint.format( Identifier(old_name), Identifier(old_index_name), Identifier(new_index_name), )) else: print("Renamed all indexes, constraints and the corresponding metadata") # rename meta_tables and meta_tables_hist rename_table_in_meta = SQL("UPDATE {0} SET name = %s WHERE name = %s") for meta in ["meta_tables", "meta_tables_hist"]: self._execute(rename_table_in_meta.format(Identifier(meta)), [new_name, old_name]) else: print("Renamed all entries meta_tables(_hist)") rename = SQL("ALTER TABLE {0} RENAME TO {1}") # rename extra table if table.extra_table is not None: old_extra = table.extra_table assert old_extra == old_name + "_extras" new_extra = new_name + "_extras" self._execute(rename.format(Identifier(old_extra), Identifier(new_extra))) print("Renamed {0} to {1}".format(old_extra, new_extra)) for suffix in ["", "_counts", "_stats"]: self._execute(rename.format(Identifier(old_name + suffix), Identifier(new_name + suffix))) print("Renamed {0} to {1}".format(old_name + suffix, new_name + suffix)) # rename oldN tables for backup_number in range(table._next_backup_number()): for ext in ["", "_extras", "_counts", "_stats"]: old_name_old = "{0}{1}_old{2}".format(old_name, ext, backup_number) new_name_old = "{0}{1}_old{2}".format(new_name, ext, backup_number) if self._table_exists(old_name_old): self._execute(rename.format(Identifier(old_name_old), Identifier(new_name_old))) print("Renamed {0} to {1}".format(old_name_old, new_name_old)) for ext in ["", "_extras", "_counts", "_stats"]: old_name_tmp = "{0}{1}_tmp".format(old_name, ext) new_name_tmp = "{0}{1}_tmp".format(new_name, ext) if self._table_exists(old_name_tmp): self._execute(rename.format(Identifier(old_name_tmp), Identifier(new_name_tmp))) print("Renamed {0} to {1}".format(old_name_tmp, new_name_old)) # initialized table tabledata = self._execute( SQL( "SELECT name, label_col, sort, count_cutoff, id_ordered, " "out_of_order, has_extras, stats_valid, total, include_nones " "FROM meta_tables WHERE name = %s" ), [new_name], ).fetchone() table = self._search_table_class_(self, *tabledata) self.__dict__[new_name] = table self.tablenames.append(new_name) self.tablenames.remove(old_name) self.tablenames.sort()
def update(self, an_id: id = None, where_key: str = None, name: str = None, data=None, latex: str = None, notes: str = None, unit_id: int = None, image: bytes = None, template_id: int = None, dimensions: int = None, modified_by: str = None, created_by: str = None, verbose: bool = None): """Insert New Record Into math_object""" if where_key is None: where_key = self.id_name if an_id is None: warn("No Record ID Specified", NoRecordIDError) else: if data is None: data = {} db_params = config() data.update(self._add_field('name', name)) data.update(self._add_field('notes', notes)) data.update(self._process_latex(latex, image, template_id, verbose)) data.update(self._add_field('dimensions', dimensions)) data.update(self._add_field('unit_id', unit_id)) data.update(self._add_field('created_by', created_by)) # If there is no data, then skip. Of course one could still change modified by: if len(data) > 0 or modified_by is not None: # Always require a modified by and because one can change data without specifying a modifer, # this is necessary. We don't check it before the previous if, because we don't want to create # a modified_by if not data was set and no modified_by was set. if modified_by is None: modified_by = db_params['user'] data.update(modified_by=modified_by) fields = data.keys() sql = "UPDATE {table} SET {fields} WHERE {pkey} = {a_value} RETURNING *" query = SQL(sql).format( table=Identifier(self.table), fields=SQL(', ').join( Composed([Identifier(k), SQL(' = '), Placeholder(k)]) for k in fields), pkey=Identifier(where_key), a_value=Placeholder('where_key')) data.update(where_key=an_id) conn = connect(**db_params) cur = conn.cursor(cursor_factory=NamedTupleCursor) if verbose: print(query.as_string(conn)) print(cur.mogrify(query, data)) try: cur.execute(query, data) except OperationalError as error: print(error) new_record = cur.fetchall() conn.commit() cur.close() conn.close() self.last_inserted = self.as_columns(new_record) self.records = self.all_records()