Example #1
0
def connect(url=None,
            schema=None,
            reflect_metadata=True,
            engine_kwargs=None,
            reflect_views=True,
            ensure_schema=True,
            row_type=row_type):
    """ Opens a new connection to a database.

    *url* can be any valid `SQLAlchemy engine URL`_.  If *url* is not defined
    it will try to use *DATABASE_URL* from environment variable.  Returns an
    instance of :py:class:`Database <dataset.Database>`. Set *reflect_metadata*
    to False if you don't want the entire database schema to be pre-loaded.
    This significantly speeds up connecting to large databases with lots of
    tables. *reflect_views* can be set to False if you don't want views to be
    loaded.  Additionally, *engine_kwargs* will be directly passed to
    SQLAlchemy, e.g.  set *engine_kwargs={'pool_recycle': 3600}* will avoid `DB
    connection timeout`_. Set *row_type* to an alternate dict-like class to
    change the type of container rows are stored in.::

        db = dataset.connect('sqlite:///factbook.db')

    .. _SQLAlchemy Engine URL: http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine
    .. _DB connection timeout: http://docs.sqlalchemy.org/en/latest/core/pooling.html#setting-pool-recycle
    """
    if url is None:
        url = os.environ.get('DATABASE_URL', 'sqlite://')

    return Database(url,
                    schema=schema,
                    reflect_metadata=reflect_metadata,
                    engine_kwargs=engine_kwargs,
                    reflect_views=reflect_views,
                    ensure_schema=ensure_schema,
                    row_type=row_type)
Example #2
0
def connect(
    url=None,
    schema=None,
    engine_kwargs=None,
    ensure_schema=True,
    row_type=row_type,
    sqlite_wal_mode=True,
    on_connect_statements=None,
):
    """Opens a new connection to a database.

    *url* can be any valid `SQLAlchemy engine URL`_.  If *url* is not defined
    it will try to use *DATABASE_URL* from environment variable.  Returns an
    instance of :py:class:`Database <dataset.Database>`. Additionally,
    *engine_kwargs* will be directly passed to SQLAlchemy, e.g. set
    *engine_kwargs={'pool_recycle': 3600}* will avoid `DB connection timeout`_.
    Set *row_type* to an alternate dict-like class to change the type of
    container rows are stored in.::

        db = dataset.connect('sqlite:///factbook.db')

    One of the main features of `dataset` is to automatically create tables and
    columns as data is inserted. This behaviour can optionally be disabled via
    the `ensure_schema` argument. It can also be overridden in a lot of the
    data manipulation methods using the `ensure` flag.

    If you want to run custom SQLite pragmas on database connect, you can add them
    to on_connect_statements as a set of strings. You can view a full
    `list of PRAGMAs here`_.

    .. _SQLAlchemy Engine URL: http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine
    .. _DB connection timeout: http://docs.sqlalchemy.org/en/latest/core/pooling.html#setting-pool-recycle
    .. _list of PRAGMAs here: https://www.sqlite.org/pragma.html
    """
    if url is None:
        url = os.environ.get("DATABASE_URL", "sqlite://")

    return Database(
        url,
        schema=schema,
        engine_kwargs=engine_kwargs,
        ensure_schema=ensure_schema,
        row_type=row_type,
        sqlite_wal_mode=sqlite_wal_mode,
        on_connect_statements=on_connect_statements,
    )
Example #3
0
    parser.add_argument('--cfg', type=str, default='config/default.yaml', help='*.cfg path')
    opt = parser.parse_args()

    with open(opt.cfg, encoding='utf-8') as f:
        cfg = yaml.load(f, Loader=yaml.FullLoader)  # model dict

    os.chdir(cfg['root_dir'])
    experiment_dir = os.path.join(cfg['root_dir'], cfg['experiment_name'])

    if not os.path.exists(experiment_dir):
        os.mkdir(experiment_dir)

    train_data_csv = os.path.join(experiment_dir, 'ir_train.csv')
    test_data_csv = os.path.join(experiment_dir, 'ir_test.csv')

    Database(data_path=cfg['ir_train_dir'], save_path=train_data_csv)
    Database(data_path=cfg['ir_test_dir'], save_path=test_data_csv)

    save_m = os.path.join(experiment_dir, cfg['model_name']+'.h5')  # 保存训练模型路径
    save_h = os.path.join(experiment_dir, cfg['model_name']+'_train_history')  # 保存训练历史路径

    IMAGE_SIZE = cfg['image_size']    # 修改model中的尺寸
    BATCH = cfg['batch_size']
    EPOCH = cfg['epoch']
    CLASS_MODE = cfg['class_mode']
    COLOR_MODE = cfg['color_mode']
    LR = cfg['lr']
    # 模型
    os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg['gputouse'])

    adam = Adam(lr=LR)
Example #4
0
    d_type = cfg['d_type']    # 距离类型
    depth = cfg['depth']      # 检索返回深度, None为返回全部, P11 需要设置depth=None

    feature_path = os.path.join(experiment_dir, 'features')
    if not os.path.exists(feature_path):
        os.mkdir(feature_path)

    Q_PATH = cfg['q_path']  # 待检索数据集
    S_PATH = cfg['s_path']  # 检索数据集
    q_name = cfg['q_name']
    s_name = cfg['s_name']
    save_Q_csv = os.path.join(experiment_dir, 'ir_Q.csv')
    save_S_csv = os.path.join(experiment_dir, 'ir_S.csv')
    save_result_txt = os.path.join(experiment_dir, 'ir_query_result.txt')

    qdb = Database(data_path=Q_PATH, save_path=save_Q_csv)
    sdb = Database(data_path=S_PATH, save_path=save_S_csv)

    model = load_irmodels(cfg)
    feat_model = Model(inputs=model.input, outputs=model.get_layer(pick_layer).output)

    print(feat_model.summary())

    # 特征提取/加载
    qsamlpe = feature_samples(model_name=model_name,
                              pick_layer=pick_layer,
                              model=feat_model,
                              db=qdb,
                              image_size=image_size,
                              color_mode=COLOR_MODE,
                              dataset=q_name,