Пример #1
0
 def _generate_model(self):
     """Return the database table model object if the table exists in the database."""
     if self._database.table_exists(self.table_name):
         with self._database as db:
             self.model = generate_models(
                 db, literal_column_names=True, table_names=[self.table_name]
             )[self.table_name]
Пример #2
0
def initiate_models(db_path):
    """ Initiate model from the database file """
    db = SqliteDatabase(db_path)

    try:
        models = generate_models(db)
    except (TypeError, DatabaseError):
        print('File "{}" does not seem to be a valid database file.'.format(
            db_path))
        print("Aborting snapshot.")
        return

    return models
Пример #3
0
def _update_db_10(db):
    log.info("Migrating to DB Version 10...")

    models = generate_models(db)
    migrator: SqliteMigrator = SqliteMigrator(db)

    if "track" in models["offlinecache"]._meta.sorted_field_names:
        log.info("Drop in OfflineCache: track_id...")
        migrate(migrator.drop_column("offlinecache", "track_id"))

    db.stop()
    db.start()

    Settings.update(version=10).execute()
Пример #4
0
    def partial_properties(cls):
        """
        This method finds all the properties that still need to be filled with
        geographical data. In our case it means that any geographical column is null.
        We use this method to find out which rows (i.e. properties) of our properties table
        still need API calls to be filled.

        Note we need to use Python introspection to get all the dynamically added fields.
        In fact, calling Property.bedrooms works but Property.gas_station doesn't.

        :return: All the properties that have null for some geographical column.
        """
        _Property = generate_models(db)['properties']
        any_null = reduce(operator.or_, [
            _Property._meta.fields[cat].is_null()
            for cat in Category.categories()
        ])
        return _Property.select().where(any_null)
Пример #5
0
def file_to_db(input_,
               connection,
               table_name,
               schema=None,
               lowercase=False,
               create=False,
               drop=False,
               truncate=False,
               primary_key=None,
               load_data=True,
               use_copy=True,
               chunk_size=100000,
               show_progress=False):
    """Loads a table from a file to a database.

    Loads a file or a `~astropy.table.Table` object into a database. If
    ``create=True`` a new table will be created, with column types matching
    the table ones. All columns are initially defined as ``NULL``.

    By default, the data are loaded using the ``COPY`` method to optimise
    performance. This can be disabled if needed.

    Parameters
    ----------
    input_ : str or ~astropy.table.Table
        The path to a file that will be opened using
        `Table.read <astropy.table.Table.read>` or an astropy
        `~astropy.table.Table`.
    connection : .PeeweeDatabaseConnection
        The Peewee database connection to use (SQLAlchemy connections are
        not supported).
    table_name : str
        The name of the table where to load the data, or to be created.
    schema : str
        The schema in which the table lives.
    lowercase : bool
        If `True`, all column names will be converted to lower case.
    create : bool
        Creates the table if it does not exist.
    drop : bool
        Drops the table before recreating it. Implies ``create=True``. Note
        that a ``CASCADE`` drop will be executed. Use with caution.
    truncate : bool
        Truncates the table before loading the data but maintains the existing
        columns.
    primary_key : str
        The name of the column to mark as primary key (ignored if the table
        is not being created).
    load_data : bool
        If `True`, loads the data from the table; otherwise just creates the
        table in the database.
    use_copy : bool
        When `True` (recommended) uses the SQL ``COPY`` command to load the data
        from a CSV stream.
    chunk_size : int
        How many rows to load at once.
    show_progress : bool
        If `True`, shows a progress bar. Requires the ``progressbar2`` module
        to be installed.

    Returns
    -------
    model : ~peewee:Model
        The model for the table created.

    """

    import astropy.table

    # If we drop we need to re-create but there is no need to truncate.
    if drop:
        create = True
        truncate = False

    if isinstance(input_, str) and os.path.isfile(input_):
        table = astropy.table.Table.read(input_)
    else:
        assert isinstance(input_, astropy.table.Table)
        table = input_

    if drop:
        drop_table(table_name, connection, schema=schema)

    if table_exists(table_name, connection, schema=schema):
        Model = generate_models(connection,
                                schema=schema,
                                table_names=[table_name])[table_name]
    else:
        if not create:
            raise ValueError(f'table {table_name} does not exist. '
                             'Call the function with create=True '
                             'if you want to create it.')

        Model = create_model_from_table(table_name,
                                        table,
                                        schema=schema,
                                        lowercase=lowercase,
                                        primary_key=primary_key)
        Model._meta.database = connection

        Model.create_table()

    if truncate:
        Model.truncate_table()

    if load_data:
        if use_copy:
            copy_data(table,
                      connection,
                      table_name,
                      schema=schema,
                      chunk_size=chunk_size,
                      show_progress=show_progress)
        else:
            bulk_insert(table,
                        connection,
                        Model,
                        chunk_size=chunk_size,
                        show_progress=show_progress)

    return Model
Пример #6
0
                           port=5000,
                           user="******",
                           password="******")
db.connect()

# db.execute_sql("drop view vsources cascade")
db.execute_sql(
    "create or replace view vsources as select s.id as package_id, d.name as distro, s.name, s.suite, s.architecture from sources s join distributions d on s.distribution = d.id"
)

# db.execute_sql("drop view vresults")
db.execute_sql(
    "create or replace view vresults as select * from vsources v join results r using (package_id)"
)

models = generate_models(db, include_views=True)
globals().update(models)

# with db.atomic() as txn:
#    suites = txn.execute_sql("select distinct suite from vsources where distro = 'archlinux'").fetchall()
#    print(suites)

distros = ["coreboot", "opensuse", "openwrt", "debian", "archlinux"]

for distro in distros:
    print(f"rendering { distro }")
    context = {}
    context["distro"] = distro
    context["suites"] = {}

    query = db.execute_sql(
Пример #7
0
            query = Stats_Raw.select().where((Stats_Raw.read_hour.between(
                read_hour - window,
                read_hour + window))).order_by(fn.Rand()).limit(limit)
        else:
            query = Stats_Raw.select().where(
                Stats_Raw.read_hour == read_hour).order_by(
                    fn.Rand()).limit(limit)
        results = self.transform_query_to_list(query)
        if len(results) > 0:
            uris = [o.uri for o in results]
            return uris


if __name__ == "__main__":

    models = generate_models(db)
    print('MODEL TAG')
    print_model(models['tag'])

    mydb = DatabaseHandler()

    print('ALL TAGS')
    tags = mydb.get_all_tags()
    pprint.pprint(tags)

    print('creating')
    mydb.create_tag('super_uid2', 'super_media')

    print('searching')
    for tag in Tag.select().where(Tag.uid.contains('super')):
        print(tag)
Пример #8
0
def _update_db_9(db):
    log.info("Migrating to DB Version 9...")

    models = generate_models(db)

    migrator: SqliteMigrator = SqliteMigrator(db)

    db.create_tables([File, TrackToFile])
    db.stop()
    db.start()

    files: List[File] = []
    track_to_files: List[TrackToFile] = []
    file_id = 1

    if "file" in models["track"]._meta.sorted_field_names:
        log.info("Generating File and TrackToFile objects...")
        for track in models["track"].select():
            path = track.file

            file = next((f for f in files if f.path == path), None)

            if File.select().where(File.path == path).count() > 0:
                log.info("Path already existing in db: {}".format(path))
                file = File.select().where(File.path == path).get()
            elif not file:
                file = File(path=path, modified=track.modified, id=file_id)
                files.append(file)
                file_id += 1

            if TrackToFile.select().join(Track).where(
                    TrackToFile.track.id == track.id).count() > 0:
                log.info("TrackToFile already existing in db: {}".format(path))
                continue

            track_to_file = TrackToFile(track=track, file=file, start_at=0)
            track_to_files.append(track_to_file)

        log.info("Inserting File and TrackToFile objects...")
        File.bulk_create(files, batch_size=300)
        TrackToFile.bulk_create(track_to_files, batch_size=300)

    field = ForeignKeyField(File, null=True, field=File.id)

    if "cached_file" not in models["offlinecache"]._meta.sorted_field_names:
        log.info("Rename in OfflineCache: file to cached_file...")
        migrate(migrator.rename_column("offlinecache", "file",
                                       "cached_file"), )

    if "original_file" not in models["offlinecache"]._meta.sorted_field_names:
        log.info("Add in OfflineCache: original_file_id...")
        migrate(migrator.add_column("offlinecache", "original_file_id", field))

    db.stop()
    db.start()
    models = generate_models(db)

    if "file" in models["track"]._meta.sorted_field_names:
        log.info("Migrating OfflineCache...")
        for cache in models["offlinecache"].select():
            file_query = File.select().where(File.path == cache.track.file)
            if file_query.count() < 0:
                cache.delete_instance()

            file = file_query.get()
            cache.original_file = file
            cache.save(only=cache.dirty_fields)

    if "file" in models["track"]._meta.sorted_field_names:
        log.info("Drop in Track: file...")
        migrate(migrator.drop_column("track", "file"))

    if "modified" in models["track"]._meta.sorted_field_names:
        log.info("Drop in Track: modified...")
        migrate(migrator.drop_column("track", "modified"))

    if "track_id" in models["offlinecache"]._meta.sorted_field_names:
        log.info("Drop in OfflineCache: track_id...")
        migrate(migrator.drop_column("offlinecache", "track_id"))

    migrate(migrator.add_not_null("offlinecache", "original_file_id"))

    db.stop()
    db.start()

    log.info("Reset modified on all m4b files")
    File.update(modified=0).where(fn.Lower(
        File.path).endswith("m4b")).execute()

    db.stop()
    db.start()

    Settings.update(version=9).execute()