示例#1
0
文件: base.py 项目: asym57/lmfdb
    def _revert_meta(self, meta_name, search_table, version=None):
        meta_cols, _, jsonb_idx = _meta_cols_types_jsonb_idx(meta_name)
        # the column which will match search_table
        table_name = _meta_table_name(meta_name)

        table_name_sql = Identifier(table_name)
        meta_name_sql = Identifier(meta_name)
        meta_name_hist_sql = Identifier(meta_name + "_hist")

        # by the default goes back one step
        currentversion = self._get_current_meta_version(
            meta_name, search_table)
        if currentversion == -1:
            raise RuntimeError("No history to revert")
        if version is None:
            version = max(0, currentversion - 1)

        with DelayCommit(self, silence=True):
            # delete current rows
            self._execute(
                SQL("DELETE FROM {} WHERE {} = %s").format(
                    meta_name_sql, table_name_sql),
                [search_table],
            )

            # copy data from history
            cols_sql = SQL(", ").join(map(Identifier, meta_cols))
            rows = self._execute(
                SQL("SELECT {} FROM {} WHERE {} = %s AND version = %s").format(
                    meta_name_hist_sql, cols_sql, table_name_sql),
                [search_table, version],
            )

            place_holder = SQL(", ").join(Placeholder() * len(meta_cols))
            query = SQL("INSERT INTO {} ({}) VALUES ({})").format(
                meta_name_sql, cols_sql, place_holder)

            cols = meta_cols + ("version", )
            cols_sql = SQL(", ").join(map(Identifier, cols))
            place_holder = SQL(", ").join(Placeholder() * len(cols))
            query_hist = SQL("INSERT INTO {} ({}) VALUES ({})").format(
                meta_name_hist_sql, cols_sql, place_holder)
            for row in rows:
                row = [
                    Json(elt) if i in jsonb_idx else elt
                    for i, elt in enumerate(row)
                ]
                self._execute(query, row)
                self._execute(query_hist, row + [currentversion + 1])
示例#2
0
文件: knowl.py 项目: sanni85/lmfdb
    def save(self, knowl, who):
        """who is the ID of the user, who wants to save the knowl"""
        new_history_item = self.get_knowl(knowl.id, ['id'] +
                                          self._default_fields + ['history'])
        new_knowl = new_history_item is None
        if new_knowl:
            history = []
            authors = []
        else:
            history = new_history_item.pop('history')
            if history is not None:
                history += [new_history_item]
            else:
                history = []
            authors = new_history_item.pop('authors', [])
            if authors is None:
                authors = []

        if who and who not in authors:
            authors = authors + [who]

        search_keywords = make_keywords(knowl.content, knowl.id, knowl.title)
        cat = extract_cat(knowl.id)
        values = (authors, cat, knowl.content, who, knowl.quality,
                  knowl.timestamp, knowl.title, history, search_keywords)
        with DelayCommit(self):
            insterer = SQL(
                "INSERT INTO kwl_knowls (id, {0}, history, _keywords) VALUES (%s, {1}) ON CONFLICT (id) DO UPDATE SET ({0}, history, _keywords) = ({1})"
            )
            insterer = insterer.format(
                SQL(', ').join(map(Identifier, self._default_fields)),
                SQL(", ").join(Placeholder() *
                               (len(self._default_fields) + 2)))
            self._execute(insterer, (knowl.id, ) + values + values)
            self.save_history(knowl, who)
示例#3
0
    def add_feature(self, columns, values, columns_with_types):
        # Validation
        error = False
        for i in range(1, len(columns)):
            value = values[i]
            column_name = columns[i]

            if isinstance(values[i], type(None)):
                continue

            if columns_with_types[column_name] == "dict":
                dict_ = Dict.get(layer_id=self.lid, column_name=column_name)
                allowed_values = dict_.get_values()
                if value not in allowed_values:
                    error = f"value '{value}' is not allowed for dict-column '{column_name}'. allowed values: '{allowed_values}'"

            elif not isinstance(value, TYPES[columns_with_types[column_name]]):
                error = f"value '{values[i]}' invalid type of column '{column_name}' ({columns_with_types[column_name]})"
                break

            elif columns_with_types[
                    column_name] == 'timestamp without time zone':
                values[i] = datetime.fromtimestamp(value)

        if error:
            raise ValueError(error)
        query_string = SQL(
            "INSERT INTO {} ({}) values ({})  RETURNING id;").format(
                Identifier(self.name),
                SQL(', ').join(map(lambda c: Identifier(c), columns)),
                SQL(', ').join(Placeholder() * len(columns)))
        cursor = self.execute(query_string, *[values])
        return self.as_geojson_by_fid(cursor.fetchone()[0])
示例#4
0
    def _reload_meta(self, meta_name, filename, search_table, sep="|"):
        meta_cols, _, jsonb_idx = _meta_cols_types_jsonb_idx(meta_name)
        # the column which will match search_table
        table_name = _meta_table_name(meta_name)

        table_name_idx = meta_cols.index(table_name)
        table_name_sql = Identifier(table_name)
        meta_name_sql = Identifier(meta_name)
        meta_name_hist_sql = Identifier(meta_name + "_hist")

        with open(filename, "r") as F:
            lines = [line for line in csv.reader(F, delimiter=str(sep))]
            if len(lines) == 0:
                return
            for line in lines:
                if line[table_name_idx] != search_table:
                    raise RuntimeError(
                        "in %s column %d (= %s) in the file "
                        "doesn't match the search table name %s"
                        % (filename, table_name_idx, line[table_name_idx], search_table)
                    )

        with DelayCommit(self, silence=True):
            # delete the current columns
            self._execute(
                SQL("DELETE FROM {} WHERE {} = %s").format(meta_name_sql, table_name_sql),
                [search_table],
            )

            # insert new columns
            with open(filename, "r") as F:
                try:
                    cur = self._db.cursor()
                    cur.copy_from(F, meta_name, columns=meta_cols, sep=sep)
                except Exception:
                    self.conn.rollback()
                    raise

            version = self._get_current_meta_version(meta_name, search_table) + 1

            # copy the new rows to history
            cols_sql = SQL(", ").join(map(Identifier, meta_cols))
            rows = self._execute(
                SQL("SELECT {} FROM {} WHERE {} = %s").format(cols_sql, meta_name_sql, table_name_sql),
                [search_table],
            )

            cols = meta_cols + ("version",)
            cols_sql = SQL(", ").join(map(Identifier, cols))
            place_holder = SQL(", ").join(Placeholder() * len(cols))
            query = SQL("INSERT INTO {} ({}) VALUES ({})").format(meta_name_hist_sql, cols_sql, place_holder)

            for row in rows:
                row = [
                    Json(elt) if i in jsonb_idx else elt for i, elt in enumerate(row)
                ]
                self._execute(query, row + [version])
示例#5
0
 def value_to_sql(self, value, column_name=None):
     if isinstance(value, Expression):
         sql, args = value.to_sql()
         return SQL('{}').format(sql), args
     else:
         if self.relation and column_name:
             column = self.relation.get_column(column_name)
             value = column.to_db_value(value)
         return Placeholder(), (value, )
示例#6
0
def filter_sql_injection(clause, col, col_type, op, table):
    """
    INPUT:

    - ``clause`` -- a plain string, obtained from the website UI so NOT SAFE
    - ``col`` -- an SQL Identifier for a column in a table
    - ``col_type`` -- a string giving the type of the column
    - ``valid_cols`` -- the column names for this table
    - ``op`` -- a string giving the operator to use
      (`=` or one of the values in the ``postgres_infix_ops dictionary`` above)
    - ``table`` -- a PostgresTable object for determining which columns are valid
    """
    # Our approach:
    # * strip all whitespace: this makes some of the analysis below easier
    #   and is okay since we support implicit multiplication at a higher level
    # * Identify numbers (using a float regular expression) and call int or float on them as appropriate
    # * whitelist names of columns and wrap them all in identifiers;
    # * no other alphabetic characters allowed: this prevents the use
    #   of any SQL functions or commands
    # * The only other allowed characters are +-*^/().
    # * We also prohibit --, /* and */ since these are comments in SQL
    clause = re.sub(r"\s+", "", clause)
    # It's possible that some search columns include numbers (dim1_factor in av_fq_isog for example)
    # However, we don't support columns that are entirely numbers (such as some in smf_dims)
    # since there's no way to distinguish them from integers
    # We also want to include periods as part of the word/number character set, since they can appear in floats
    FLOAT_RE = r"^((\d+([.]\d*)?)|([.]\d+))([eE][-+]?\d+)?$"
    ARITH_RE = r"^[+*-/^()]+$"
    processed = []
    values = []
    pieces = re.split(r"([A-Za-z_.0-9]+)", clause)
    for i, piece in enumerate(pieces):
        if not piece:  # skip empty strings at beginning/end
            continue
        if i % 2:  # a word/number
            if piece in table.search_cols:
                processed.append(Identifier(piece))
            elif re.match(FLOAT_RE, piece):
                processed.append(Placeholder())
                if any(c in piece for c in "Ee."):
                    values.append(float(piece))
                else:
                    values.append(int(piece))
            else:
                raise SearchParsingError("%s: %s is not a column of %s" %
                                         (clause, piece, table.search_table))
        else:
            if re.match(ARITH_RE,
                        piece) and not any(comment in piece
                                           for comment in ["--", "/*", "*/"]):
                processed.append(SQL(piece))
            else:
                raise SearchParsingError(
                    "%s: invalid characters %s (only +*-/^() allowed)" %
                    (clause, piece))
    return SQL("{0} %s {1}" % op).format(col, SQL("").join(processed)), values
示例#7
0
def load_table(cursor, tablename, columns, data, unique_key=None):
    unique_key = unique_key or (get_pkey_column(cursor, tablename), )

    # FIXME: Most of the branching here can be replaced
    #        by an upsert in postgres >9.4
    if unique_key == (None, ):
        sql = SQL("""
        INSERT INTO {} ({})
        VALUES({})
        """).format(Identifier(tablename),
                    SQL(', ').join(map(Identifier, columns)),
                    SQL(', ').join(Placeholder() for c in columns))
    else:
        unique_as_idents = list(map(Identifier, unique_key))
        unique_as_placeholders = [Placeholder() for c in unique_key]
        unique_matches_condition = SQL(' AND ').join(
            map(
                lambda ident_ph_tup: SQL('{} = {}').format(
                    ident_ph_tup[0], ident_ph_tup[1]),
                zip(unique_as_idents, unique_as_placeholders)))

        sql = SQL("""
        INSERT INTO {0} ({1})
        SELECT {2}
        WHERE NOT EXISTS (
            SELECT * FROM {0} WHERE {3}
        )""").format(Identifier(tablename),
                     SQL(', ').join(map(Identifier, columns)),
                     SQL(', ').join(Placeholder() for c in columns),
                     unique_matches_condition)

    for row in data:
        if unique_key == (None, ):
            params = row
        else:
            row_data = dict(zip(columns, row))
            unique_values = tuple(
                map(lambda column: row_data[column], unique_key))
            params = row + unique_values
        try:
            cursor.execute(sql, params)
        except psycopg2.errors.UniqueViolation as e:
            logging.error(e)
示例#8
0
文件: knowl.py 项目: sanni85/lmfdb
 def delete(self, knowl):
     """deletes this knowl from the db. (DANGEROUS, ADMIN ONLY!)"""
     with DelayCommit(self):
         insterer = SQL(
             "INSERT INTO kwl_deleted (id, {0}) VALUES (%s, {1})").format(
                 SQL(', ').join(map(Identifier, self._default_fields)),
                 SQL(', ').join(Placeholder() * len(self._default_fields)))
         values = self.get_knowl(knowl.id)
         self._execute(insterer, [knowl.id] +
                       [values[i] for i in self._default_fields])
         deletor = SQL("DELETE FROM kwl_knowls WHERE id = %s")
         self._execute(deletor, (knowl.id, ))
示例#9
0
    def insert(self, subset=None):
        """Build INSERT statement on current table for all columns, or subset if
        specified.

        :param iterable subset: collection of columns to specify in query
        :return: (*psycopg2.sql.SQL*) -- template for insert statement
        """
        n_values = len(subset) if subset is not None else len(self.columns)
        return SQL("INSERT INTO {table} ({fields}) VALUES ({values})").format(
            table=self._table(),
            fields=self._columns(subset),
            values=SQL(",").join(Placeholder() * n_values),
        )
示例#10
0
def update_task(user: namedtuple, task_id: int, details: dict) -> dict:
    """
    Update task details such as name, estimation and etc.
    """
    db = get_db()
    with db.cursor() as cursor:
        changes = [
            SQL('{field} = {value}').format(field=Identifier(field),
                                            value=Placeholder(field))
            for field in details.keys()
        ]

        query = SQL('''
        WITH prepare_task_for_update(task_id) as (
            SELECT task.task_id
            FROM task
            INNER JOIN user_to_team
                ON user_to_team.team_id = task.team_id
                AND user_to_team.user_id = %(user_id)s
            WHERE task.task_id = %(task_id)s
        )
        UPDATE task
        SET {changes}
        FROM prepare_task_for_update
        WHERE task.task_id = prepare_task_for_update.task_id
        RETURNING task.task_id, 
            task.project_id,
            task.team_id,
            task.name,
            task.description,
            task.estimation,
            task.status,
            task.created_at,
            task.created_by,
            task.due_date,
            task.assignee_id
        ;
        ''').format(changes=SQL(', ').join(changes))

        params = {**details, 'task_id': task_id, 'user_id': user.user_id}
        cursor.execute(query, params)
        db.commit()

        updated_task = cursor.fetchone()
        if not updated_task:
            raise EntityNotFound()

        return updated_task
示例#11
0
def get_updates(config, conn):
    update_list = []
    with conn.cursor() as cursor:
        query = SQL("""
            select id,
                   satellite_id,
                   full_path
            from downloader_history
            where tiles is null
              and status_id in (2, 5, 6, 7)
              {}
            """)
        if config.site_id is not None:
            query = query.format(SQL("and site_id = {}").format(Placeholder()))
            cursor.execute(query, (config.site_id, ))
        else:
            query = query.format(SQL(""))
            cursor.execute(query)

        products = cursor.fetchall()
        conn.commit()

    for (id, satellite_id, full_path) in products:
        try:
            if satellite_id == 1:
                tiles = get_sentinel_tiles(full_path)
            elif satellite_id == 2:
                tiles = get_landsat_tiles(full_path)
            else:
                print("Unknown satellite id {} for product {}".format(
                    satellite_id, full_path))
                continue

            if len(tiles) > 0:
                update_list.append((id, tiles))
            elif len(tiles) == 0:
                print("No tiles found for product {}".format(full_path))

                update_list.append((id, []))
            else:
                print("Unable to recognize product {}".format(full_path))
        except Exception:
            print("While checking product {}".format(full_path))
            traceback.print_exc()

    return update_list
示例#12
0
    def save(self, data):
        if not self._rw_userdb:
            logger.info("no attempt to save, not enough privileges")
            return

        data = dict(data)  # copy
        uid = data.pop("username", None)
        if not uid:
            raise ValueError("data must contain username")
        if not self.user_exists(uid):
            raise ValueError("user does not exist")
        if not data:
            raise ValueError("no data to save")
        fields, values = zip(*data.items())
        updater = SQL(
            "UPDATE userdb.users SET ({0}) = ({1}) WHERE username = %s"
        ).format(
            SQL(", ").join(map(Identifier, fields)),
            SQL(", ").join(Placeholder() * len(values)))
        self._execute(updater, list(values) + [uid])
示例#13
0
    def save(self, knowl, who, most_recent=None, minor=False):
        """who is the ID of the user, who wants to save the knowl"""
        if most_recent is None:
            most_recent = self.get_knowl(knowl.id,
                                         ['id'] + self._default_fields,
                                         allow_deleted=False)
        new_knowl = most_recent is None
        if new_knowl:
            authors = []
        else:
            authors = most_recent.pop('authors', [])

        if not minor and who and who not in authors:
            authors = authors + [who]

        search_keywords = make_keywords(knowl.content, knowl.id, knowl.title)
        cat = extract_cat(knowl.id)
        # When renaming, source is set explicitly on the knowl
        if knowl.type == 0 and knowl.source is not None:
            typ, source, name = 0, knowl.source, knowl.source_name
        else:
            typ, source, name = extract_typ(knowl.id)
        links = extract_links(knowl.content)
        if typ == 2:  # column description
            defines = [knowl.id.split(".")[-1]]
        else:
            defines = extract_defines(knowl.content)
        # id, authors, cat, content, last_author, timestamp, title, status, type, links, defines, source, source_name
        values = (knowl.id, authors, cat, knowl.content, who, knowl.timestamp,
                  knowl.title, knowl.status, typ, links, defines, source, name,
                  search_keywords)
        with DelayCommit(self):
            inserter = SQL(
                "INSERT INTO kwl_knowls (id, {0}, _keywords) VALUES ({1})")
            inserter = inserter.format(
                SQL(', ').join(map(Identifier, self._default_fields)),
                SQL(", ").join(Placeholder() *
                               (len(self._default_fields) + 2)))
            self._execute(inserter, values)
        self.cached_titles[knowl.id] = knowl.title
示例#14
0
文件: database.py 项目: ousamg/MegaQC
def postgres_create_user(username, conn, cur, password=None):
    """
    Create a postgres user, including a password if provided.
    """
    from psycopg2.errors import DuplicateObject, ProgrammingError
    from psycopg2.sql import SQL, Identifier, Placeholder

    # Run the CREATE USER
    try:
        if password:
            cur.execute(
                SQL("CREATE USER {} WITH ENCRYPTED PASSWORD {}").format(
                    Identifier(username), Placeholder()),
                [password],
            )
        else:
            cur.execute(SQL("CREATE USER {}").format(Identifier(username)))

        print(f"User {username} created successfully")
    except DuplicateObject as e:
        # it's okay if user already exists
        print(f"User {username} already exists")
示例#15
0
 def upsertRecord(self, record):
     assert isinstance(record, records.Record)
     mData = makeData(record)
     table = Identifier(type(record).__name__.lower())
     aFields = []
     mArgs = {}
     for sField, sType, value in mData.values():
         aFields.append(sField)
         mArgs[sField] = value
     fields = SQL(', ').join(Identifier(sField) for sField in aFields)
     values = SQL(', ').join(Placeholder(sField) for sField in aFields)
     if (record.aPKs):
         action = (SQL('DO UPDATE SET ({}) = ({})').format(fields, values)
                   if aFields else SQL('DO NOTHING'))
         pks = SQL(', ').join(Identifier(sPK) for sPK in record.aPKs)
         conflict = SQL('ON CONFLICT ({}) {}').format(pks, action)
     else:
         conflict = SQL('')
     query = SQL('INSERT INTO {} ({}) VALUES ({}) {};').format(
         table, fields, values, conflict)
     self._execute(query, mArgs)
     log.debug('record "{}" upserted'.format(record))
示例#16
0
def search_distinct(
    table,
    selecter,
    counter,
    iterator,
    query={},
    projection=1,
    limit=None,
    offset=0,
    sort=None,
    info=None,
    include_deleted=False,
    include_pending=False,
    more=False,
):
    """
    Replacement for db.*.search to account for versioning, return Web* objects.

    Doesn't support split_ors, raw or extra tables.  Always computes count.

    INPUT:

    - ``table`` -- a search table, such as db.seminars or db.talks
    - ``counter`` -- an SQL object counting distinct entries
    - ``selecter`` -- an SQL objecting selecting distinct entries
    - ``iterator`` -- an iterator taking the same arguments as ``_search_iterator``
    """
    if offset < 0:
        raise ValueError("Offset cannot be negative")
    query = dict(query)
    if not include_deleted:
        query["deleted"] = {"$or": [False, {"$exists": False}]}
    all_cols = SQL(", ").join(
        map(IdentifierWrapper, ["id"] + table.search_cols))
    search_cols, extra_cols = table._parse_projection(projection)
    tbl = IdentifierWrapper(table.search_table)
    if limit is None:
        qstr, values = table._build_query(query, sort=sort)
    else:
        qstr, values = table._build_query(query, limit, offset, sort)
    prequery = {} if include_pending else {
        '$or': [{
            'display': True
        }, {
            'by_api': False
        }]
    }
    if prequery:
        # We filter the records before finding the most recent (normal queries filter after finding the most recent)
        # This is mainly used for setting display=False or display=True
        # We take advantage of the fact that the WHERE clause occurs just after the table name in all of our query constructions
        pqstr, pqvalues = table._parse_dict(prequery)
        if pqstr is not None:
            tbl = tbl + SQL(" WHERE {0}").format(pqstr)
            values = pqvalues + values
    if more is not False:  # might empty dictionary
        more, moreval = table._parse_dict(more)
        if more is None:
            more = Placeholder()
            moreval = [True]

        cols = SQL(", ").join(
            list(map(IdentifierWrapper, search_cols + extra_cols)) + [more])
        extra_cols = extra_cols + ("more", )
        values = moreval + values
    else:
        cols = SQL(", ").join(map(IdentifierWrapper, search_cols + extra_cols))
    fselecter = selecter.format(cols, all_cols, tbl, qstr)
    cur = table._execute(
        fselecter,
        values,
        buffered=False,
        slow_note=(
            table.search_table,
            "analyze",
            query,
            repr(projection),
            limit,
            offset,
        ),
    )
    results = iterator(cur, search_cols, extra_cols, projection)
    if info is not None:
        # caller is requesting count data
        nres = count_distinct(table, counter, query)
        if limit is None:
            info["number"] = nres
            return results
        if offset >= nres > 0:
            # We're passing in an info dictionary, so this is a front end query,
            # and the user has requested a start location larger than the number
            # of results.  We adjust the results to be the last page instead.
            offset -= (1 + (offset - nres) / limit) * limit
            if offset < 0:
                offset = 0
            return search_distinct(
                table,
                selecter,
                counter,
                iterator,
                query,
                projection,
                limit,
                offset,
                sort,
                info,
            )
        info["query"] = dict(query)
        info["number"] = nres
        info["count"] = limit
        info["start"] = offset
        info["exact_count"] = True
    res = list(results)
    return res
示例#17
0
文件: database.py 项目: kwang23/lmfdb
    def rename_table(self, old_name, new_name, commit=True):
        """
        Rename a table.

        INPUT:

        - ``old_name`` -- the current name of the table, as a string
        - ``new_name`` -- the new name of the table, as a string
        """
        assert old_name != new_name
        assert new_name not in self.tablenames
        with DelayCommit(self, commit, silence=True):
            table = self[old_name]
            # first rename indexes and constraints
            icols = [Identifier(s) for s in ["index_name", "table_name"]]
            ccols = [Identifier(s) for s in ["constraint_name", "table_name"]]
            rename_index = SQL("ALTER INDEX IF EXISTS {0} RENAME TO {1}")
            rename_constraint = SQL("ALTER TABLE {0} RENAME CONSTRAINT {1} TO {2}")
            for meta, mname, cols in [
                ("meta_indexes", "index_name", icols),
                ("meta_indexes_hist", "index_name", icols),
                ("meta_constraints", "constraint_name", ccols),
                ("meta_constraints_hist", "constraint_name", ccols),
            ]:
                indexes = list(self._execute(
                    SQL("SELECT {0} FROM {1} WHERE table_name = %s").format(
                        Identifier(mname), Identifier(meta)
                    ),
                    [old_name],
                ))
                if indexes:
                    rename_index_in_meta = SQL("UPDATE {0} SET ({1}) = ({2}) WHERE {3} = {4}")
                    rename_index_in_meta = rename_index_in_meta.format(
                        Identifier(meta),
                        SQL(", ").join(cols),
                        SQL(", ").join(Placeholder() * len(cols)),
                        cols[0],
                        Placeholder(),
                    )
                    for old_index_name in indexes:
                        old_index_name = old_index_name[0]
                        new_index_name = old_index_name.replace(old_name, new_name)
                        self._execute(rename_index_in_meta, [new_index_name, new_name, old_index_name])
                        if meta == "meta_indexes":
                            self._execute(rename_index.format(
                                Identifier(old_index_name),
                                Identifier(new_index_name),
                            ))
                        elif meta == "meta_constraints":
                            self._execute(rename_constraint.format(
                                Identifier(old_name),
                                Identifier(old_index_name),
                                Identifier(new_index_name),
                            ))
            else:
                print("Renamed all indexes, constraints and the corresponding metadata")

            # rename meta_tables and meta_tables_hist
            rename_table_in_meta = SQL("UPDATE {0} SET name = %s WHERE name = %s")
            for meta in ["meta_tables", "meta_tables_hist"]:
                self._execute(rename_table_in_meta.format(Identifier(meta)), [new_name, old_name])
            else:
                print("Renamed all entries meta_tables(_hist)")

            rename = SQL("ALTER TABLE {0} RENAME TO {1}")
            # rename extra table
            if table.extra_table is not None:
                old_extra = table.extra_table
                assert old_extra == old_name + "_extras"
                new_extra = new_name + "_extras"
                self._execute(rename.format(Identifier(old_extra), Identifier(new_extra)))
                print("Renamed {0} to {1}".format(old_extra, new_extra))
            for suffix in ["", "_counts", "_stats"]:
                self._execute(rename.format(Identifier(old_name + suffix), Identifier(new_name + suffix)))
                print("Renamed {0} to {1}".format(old_name + suffix, new_name + suffix))

            # rename oldN tables
            for backup_number in range(table._next_backup_number()):
                for ext in ["", "_extras", "_counts", "_stats"]:
                    old_name_old = "{0}{1}_old{2}".format(old_name, ext, backup_number)
                    new_name_old = "{0}{1}_old{2}".format(new_name, ext, backup_number)
                    if self._table_exists(old_name_old):
                        self._execute(rename.format(Identifier(old_name_old), Identifier(new_name_old)))
                        print("Renamed {0} to {1}".format(old_name_old, new_name_old))
            for ext in ["", "_extras", "_counts", "_stats"]:
                old_name_tmp = "{0}{1}_tmp".format(old_name, ext)
                new_name_tmp = "{0}{1}_tmp".format(new_name, ext)
                if self._table_exists(old_name_tmp):
                    self._execute(rename.format(Identifier(old_name_tmp), Identifier(new_name_tmp)))
                    print("Renamed {0} to {1}".format(old_name_tmp, new_name_old))

            # initialized table
            tabledata = self._execute(
                SQL(
                    "SELECT name, label_col, sort, count_cutoff, id_ordered, "
                    "out_of_order, has_extras, stats_valid, total, include_nones "
                    "FROM meta_tables WHERE name = %s"
                ),
                [new_name],
            ).fetchone()
            table = self._search_table_class_(self, *tabledata)
            self.__dict__[new_name] = table
            self.tablenames.append(new_name)
            self.tablenames.remove(old_name)
            self.tablenames.sort()
示例#18
0
    def update(self,
               an_id: id = None,
               where_key: str = None,
               name: str = None,
               data=None,
               latex: str = None,
               notes: str = None,
               unit_id: int = None,
               image: bytes = None,
               template_id: int = None,
               dimensions: int = None,
               modified_by: str = None,
               created_by: str = None,
               verbose: bool = None):
        """Insert New Record Into math_object"""

        if where_key is None:
            where_key = self.id_name

        if an_id is None:
            warn("No Record ID Specified", NoRecordIDError)
        else:
            if data is None:
                data = {}

            db_params = config()

            data.update(self._add_field('name', name))
            data.update(self._add_field('notes', notes))
            data.update(self._process_latex(latex, image, template_id,
                                            verbose))
            data.update(self._add_field('dimensions', dimensions))
            data.update(self._add_field('unit_id', unit_id))
            data.update(self._add_field('created_by', created_by))

            # If there is no data, then skip. Of course one could still change modified by:
            if len(data) > 0 or modified_by is not None:

                # Always require a modified by and because one can change data without specifying a modifer,
                # this is necessary. We don't check it before the previous if, because we don't want to create
                # a modified_by if not data was set and no modified_by was set.
                if modified_by is None:
                    modified_by = db_params['user']

                data.update(modified_by=modified_by)

                fields = data.keys()

                sql = "UPDATE {table} SET {fields} WHERE {pkey} = {a_value} RETURNING *"

                query = SQL(sql).format(
                    table=Identifier(self.table),
                    fields=SQL(', ').join(
                        Composed([Identifier(k),
                                  SQL(' = '),
                                  Placeholder(k)]) for k in fields),
                    pkey=Identifier(where_key),
                    a_value=Placeholder('where_key'))

                data.update(where_key=an_id)

                conn = connect(**db_params)
                cur = conn.cursor(cursor_factory=NamedTupleCursor)

                if verbose:
                    print(query.as_string(conn))
                    print(cur.mogrify(query, data))

                try:
                    cur.execute(query, data)
                except OperationalError as error:
                    print(error)

                new_record = cur.fetchall()

                conn.commit()

                cur.close()
                conn.close()

                self.last_inserted = self.as_columns(new_record)

                self.records = self.all_records()
    def update(self, an_id: id = None, where_key: str = None, name: str = None, data=None, notes: str = None,
               modified_by: str = None, created_by: str = None, my_conn: Optional[dict] = None,
               t_log: Optional[TimeLogger] = None, verbose: bool = None):
        """Insert New Record Into grouped_physics_object"""

        if my_conn is None:
            my_conn = self.my_conn
        else:
            self.my_conn = my_conn

        if verbose is True and t_log is None:
            t_log = TimeLogger()

        my_conn = my_connect(my_conn=my_conn, t_log=t_log, verbose=verbose)
        conn = my_conn['conn']
        db_params = my_conn['db_params']

        if where_key is None:
            where_key = self.id_name()

        if an_id is None:
            warn("No Record ID Specified", NoRecordIDError)
        else:
            if data is None:
                data = {}

            data.update(add_field('name', name))
            data.update(add_field('notes', notes))
            data.update(add_field('created_by', created_by))

            # If there is no data, then skip. Of course one could still change modified by:
            if len(data) > 0 or modified_by is not None:

                # Always require a modified by and because one can change data without specifying a modifer,
                # this is necessary. We don't check it before the previous if, because we don't want to create
                # a modified_by if not data was set and no modified_by was set.
                if modified_by is None:
                    modified_by = db_params['user']

                data.update(modified_by=modified_by)

                fields = data.keys()

                sql = "UPDATE {table} SET {fields} WHERE {pkey} = {a_value}"

                if verbose:
                    print('Data:\n', data)
                    print('\nFields:\n', fields)

                query = SQL(sql).format(
                    table=Identifier(self.table_name),
                    fields=SQL(', ').join(
                        Composed([Identifier(k), SQL(' = '), Placeholder(k)]) for k in fields
                    ),
                    pkey=Identifier(where_key),
                    a_value=Placeholder('where_key')
                )

                data.update(where_key=an_id)

                cur = conn.cursor(cursor_factory=NamedTupleCursor)

                if verbose:
                    print(query.as_string(conn))
                    print(cur.mogrify(query, data))

                try:
                    cur.execute(query, data)
                except OperationalError as error:
                    print(error)

                conn.commit()

                cur.close()

                self.pull_data()