def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.add_column('archived_services', sa.Column('status', sa.String(), nullable=True))
    op.add_column('services', sa.Column('status', sa.String(), nullable=True))

    op.create_check_constraint(
        "ck_services_status",
        "services",
        "status in ('disabled', 'enabled', 'published')"
    )

    op.create_check_constraint(
        "ck_archived_services_status",
        "archived_services",
        "status in ('disabled', 'enabled', 'published')"
    )

    services = table('services', column('status', String))

    archived_services = table('archived_services', column('status', String))

    op.execute(
        services.update(). \
        values({'status': op.inline_literal('enabled')})
    )

    op.execute(
        archived_services.update(). \
        values({'status': op.inline_literal('enabled')})
    )
def upgrade():
    connection = op.get_bind()

    plan_table = table(
        'plan',
        sa.Column('id', sa.GUID(), nullable=False),
        sa.Column('project_id', sa.GUID(), nullable=False),
    )

    project_plan_table = table(
        'project_plan',
        sa.Column('plan_id', sa.GUID(), nullable=False),
        sa.Column('project_id', sa.GUID(), nullable=False),
    )

    for project_plan in connection.execute(project_plan_table.select()):
        print("Migrating ProjectPlan plan_id=%s project_id=%s" % (
            project_plan.plan_id, project_plan.project_id))

        connection.execute(
            plan_table.update().where(
                plan_table.c.id == project_plan.plan_id,
            ).values({
                plan_table.c.project_id: project_plan.project_id,
            })
        )
def upgrade():
    ip_policy = table('quark_ip_policy',
                      column('id', sa.String(length=36)),
                      column('size', INET()))
    ip_policy_cidrs = table('quark_ip_policy_cidrs',
                            column('ip_policy_id', sa.String(length=36)),
                            column('cidr', sa.String(length=64)))
    connection = op.get_bind()

    # 1. Retrieve all ip_policy_cidr rows.
    results = connection.execute(
        select([ip_policy_cidrs.c.ip_policy_id, ip_policy_cidrs.c.cidr])
    ).fetchall()

    # 2. Determine IPSet for each IP Policy.
    ipp = dict()
    for ip_policy_id, cidr in results:
        if ip_policy_id not in ipp:
            ipp[ip_policy_id] = netaddr.IPSet()
        ipp[ip_policy_id].add(cidr)

    # 3. Populate size for each IP Policy.
    for ip_policy_id in ipp:
        connection.execute(ip_policy.update().values(
            size=ipp[ip_policy_id].size).where(
                ip_policy.c.id == ip_policy_id))
def upgrade():
    campaign_anon_view = table("campaign_anon_view", column("created_at", sa.DateTime), column("datetime", sa.DateTime))
    op.add_column(
        "campaign_anon_view", sa.Column("datetime", sa.DateTime(), nullable=False, server_default=sa.func.now())
    )
    op.execute(campaign_anon_view.update().values(datetime=campaign_anon_view.c.created_at))
    op.alter_column("campaign_anon_view", "datetime", server_default=None)
    op.create_index(op.f("ix_campaign_anon_view_datetime"), "campaign_anon_view", ["datetime"], unique=False)

    campaign_view = table("campaign_view", column("created_at", sa.DateTime), column("datetime", sa.DateTime))
    op.add_column("campaign_view", sa.Column("datetime", sa.DateTime(), nullable=False, server_default=sa.func.now()))
    op.execute(campaign_view.update().values(datetime=campaign_view.c.created_at))
    op.alter_column("campaign_view", "datetime", server_default=None)
    op.create_index(op.f("ix_campaign_view_datetime"), "campaign_view", ["datetime"], unique=False)

    job_impression = table("job_impression", column("created_at", sa.DateTime), column("datetime", sa.DateTime))
    op.add_column("job_impression", sa.Column("datetime", sa.DateTime(), nullable=False, server_default=sa.func.now()))
    op.execute(job_impression.update().values(datetime=job_impression.c.created_at))
    op.alter_column("job_impression", "datetime", server_default=None)
    op.create_index(op.f("ix_job_impression_datetime"), "job_impression", ["datetime"], unique=False)

    job_view_session = table("job_view_session", column("created_at", sa.DateTime), column("datetime", sa.DateTime))
    op.add_column(
        "job_view_session", sa.Column("datetime", sa.DateTime(), nullable=False, server_default=sa.func.now())
    )
    op.execute(job_view_session.update().values(datetime=job_view_session.c.created_at))
    op.alter_column("job_view_session", "datetime", server_default=None)
    op.create_index(op.f("ix_job_view_session_datetime"), "job_view_session", ["datetime"], unique=False)
Beispiel #5
0
def initialize_a10_appliances_configured(conn, config):
    """Create a10_appliances_configured for devices in the config.
    Returns a mapping from device keys to a10_appliances_slb ids.
    """

    a10_appliances_configured = table(
        'a10_appliances_configured',
        column('id'),
        column('device_key'))
    select_appliances = a10_appliances_configured.select()
    appliances = conn.execute(select_appliances).fetchall()
    appliance_lookup = dict((a.device_key, a.id) for a in appliances)

    a10_appliances_slb = table(
        'a10_appliances_slb',
        column('id'),
        column('type'))
    for device_key in config.devices.keys():
        if device_key not in appliance_lookup:
            id = str(uuid.uuid4())
            insert_slb = a10_appliances_slb.insert().\
                values(id=id, type=a10_appliances_configured.name)
            conn.execute(insert_slb)
            insert_conf = a10_appliances_configured.insert().\
                values(id=id, device_key=device_key)
            conn.execute(insert_conf)
            appliance_lookup[device_key] = id

    return appliance_lookup
Beispiel #6
0
def initialize_a10_slb_v1(conn, provider, a10):
    """Create a10_slb_v1 for existing vips"""

    a10_slb_v1 = table(
        'a10_slb_v1',
        column('id'),
        column('vip_id'))
    select_vips = text(
        "SELECT vips.id, pools.tenant_id "
        "FROM vips, pools, providerresourceassociations p "
        "WHERE vips.pool_id = pools.id "
        "AND pools.id = p.resource_id "
        "AND p.provider_name = :provider "
        "AND vips.id NOT IN (SELECT vip_id FROM a10_slb_v1)")
    select_vips = select_vips.bindparams(provider=provider)
    vips = list(map(dict, conn.execute(select_vips).fetchall()))

    tenant_ids = [v['tenant_id'] for v in vips]
    tenant_appliance_lookup = initialize_a10_tenant_appliance(conn, a10, tenant_ids)

    a10_slb = table(
        'a10_slb',
        column('id'),
        column('type'),
        column('a10_appliance_id'))
    for vip in vips:
        id = str(uuid.uuid4())
        appliance = tenant_appliance_lookup[vip['tenant_id']]
        insert_slb = a10_slb.insert().\
            values(id=id, type=a10_slb_v1.name, a10_appliance_id=appliance)
        conn.execute(insert_slb)
        insert_vip = a10_slb_v1.insert().\
            values(id=id, vip_id=vip['id'])
        conn.execute(insert_vip)
Beispiel #7
0
def messages_in_narrow_backend(request, user_profile,
                               msg_ids = REQ(validator=check_list(check_int)),
                               narrow = REQ(converter=narrow_parameter)):
    # type: (HttpRequest, UserProfile, List[int], List[Dict[str, Any]]) -> HttpResponse

    # Note that this function will only work on messages the user
    # actually received

    # TODO: We assume that the narrow is a search.  For now this works because
    # the browser only ever calls this function for searches, since it can't
    # apply that narrow operator itself.

    query = select([column("message_id"), column("subject"), column("rendered_content")],
                   and_(column("user_profile_id") == literal(user_profile.id),
                        column("message_id").in_(msg_ids)),
                   join(table("zerver_usermessage"), table("zerver_message"),
                        literal_column("zerver_usermessage.message_id") ==
                        literal_column("zerver_message.id")))

    builder = NarrowBuilder(user_profile, column("message_id"))
    for term in narrow:
        query = builder.add_term(query, term)

    sa_conn = get_sqlalchemy_connection()
    query_result = list(sa_conn.execute(query).fetchall())

    search_fields = dict()
    for row in query_result:
        (message_id, subject, rendered_content, content_matches, subject_matches) = row
        search_fields[message_id] = get_search_fields(rendered_content, subject,
                                                      content_matches, subject_matches)

    return json_success({"messages": search_fields})
def upgrade():
    conn = op.get_bind()
    op.add_column('external_identities', sa.Column('local_user_id',
                                                   sa.Integer()))

    external_identities_t = table('external_identities',
                                  sa.Column('local_user_name', sa.Unicode(50)),
                                  sa.Column('local_user_id', sa.Integer))
    users_t = table('users',
                    sa.Column('user_name', sa.Unicode(50)),
                    sa.Column('id', sa.Integer))

    stmt = external_identities_t.update().values(local_user_id=users_t.c.id). \
        where(users_t.c.user_name == external_identities_t.c.local_user_name)
    conn.execute(stmt)
    op.drop_constraint('pk_external_identities', 'external_identities',
                       type='primary')
    op.drop_constraint('fk_external_identities_local_user_name_users',
                       'external_identities', type='foreignkey')
    op.drop_column('external_identities', 'local_user_name')
    op.create_primary_key('pk_external_identities', 'external_identities',
                          cols=['external_id', 'local_user_id',
                                'provider_name'])
    op.create_foreign_key(None, 'external_identities', 'users',
                          remote_cols=['id'],
                          local_cols=['local_user_id'], onupdate='CASCADE',
                          ondelete='CASCADE')
    def test_limit_offset_with_correlated_order_by(self):
        t1 = table('t1', column('x', Integer), column('y', Integer))
        t2 = table('t2', column('x', Integer), column('y', Integer))

        order_by = select([t2.c.y]).where(t1.c.x == t2.c.x).as_scalar()
        s = select([t1]).where(t1.c.x == 5).order_by(order_by) \
            .limit(10).offset(20)

        self.assert_compile(
            s,
            "SELECT anon_1.x, anon_1.y "
            "FROM (SELECT t1.x AS x, t1.y AS y, "
            "ROW_NUMBER() OVER (ORDER BY "
            "(SELECT t2.y FROM t2 WHERE t1.x = t2.x)"
            ") AS mssql_rn "
            "FROM t1 "
            "WHERE t1.x = :x_1) AS anon_1 "
            "WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
            checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
        )

        c = s.compile(dialect=mssql.MSDialect())
        eq_(len(c._result_columns), 2)
        assert t1.c.x in set(c._create_result_map()['x'][1])
        assert t1.c.y in set(c._create_result_map()['y'][1])
 def test_union(self):
     t1 = table(
         't1', column('col1'), column('col2'),
         column('col3'), column('col4'))
     t2 = table(
         't2', column('col1'), column('col2'),
         column('col3'), column('col4'))
     s1, s2 = select(
         [t1.c.col3.label('col3'), t1.c.col4.label('col4')],
         t1.c.col2.in_(['t1col2r1', 't1col2r2'])), \
         select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
                t2.c.col2.in_(['t2col2r2', 't2col2r3']))
     u = union(s1, s2, order_by=['col3', 'col4'])
     self.assert_compile(u,
                         'SELECT t1.col3 AS col3, t1.col4 AS col4 '
                         'FROM t1 WHERE t1.col2 IN (:col2_1, '
                         ':col2_2) UNION SELECT t2.col3 AS col3, '
                         't2.col4 AS col4 FROM t2 WHERE t2.col2 IN '
                         '(:col2_3, :col2_4) ORDER BY col3, col4')
     self.assert_compile(u.alias('bar').select(),
                         'SELECT bar.col3, bar.col4 FROM (SELECT '
                         't1.col3 AS col3, t1.col4 AS col4 FROM t1 '
                         'WHERE t1.col2 IN (:col2_1, :col2_2) UNION '
                         'SELECT t2.col3 AS col3, t2.col4 AS col4 '
                         'FROM t2 WHERE t2.col2 IN (:col2_3, '
                         ':col2_4)) AS bar')
Beispiel #11
0
 def test_delete_extra_froms(self):
     t1 = table("t1", column("c1"))
     t2 = table("t2", column("c1"))
     q = sql.delete(t1).where(t1.c.c1 == t2.c.c1)
     self.assert_compile(
         q, "DELETE FROM t1 FROM t1, t2 WHERE t1.c1 = t2.c1"
     )
Beispiel #12
0
def upgrade():
  op.add_column('task_groups', sa.Column('sort_index',
                sa.String(length=250), nullable=False))
  op.add_column('cycle_task_groups', sa.Column('sort_index',
                sa.String(length=250), nullable=False))

  workflows_table = table(
      'workflows',
      column('id', sa.Integer)
  )

  task_groups_table = table(
      'task_groups',
      column('id', sa.Integer),
      column('sort_index', sa.String),
      column('workflow_id', sa.Integer),
  )

  cycles_table = table(
      'cycles',
      column('id', sa.Integer),
      column('sort_index', sa.String),
      column('workflow_id', sa.Integer),
  )

  cycle_task_groups_table = table(
      'cycle_task_groups',
      column('id', sa.Integer),
      column('sort_index', sa.String),
      column('cycle_id', sa.Integer),
  )

  _set_sort_index(workflows_table, 'workflow_id', task_groups_table)
  _set_sort_index(cycles_table, 'cycle_id', cycle_task_groups_table)
Beispiel #13
0
 def test_delete_extra_froms(self):
     t1 = table('t1', column('c1'))
     t2 = table('t2', column('c1'))
     q = sql.delete(t1).where(t1.c.c1 == t2.c.c1)
     self.assert_compile(
         q, "DELETE FROM t1 USING t1, t2 WHERE t1.c1 = t2.c1"
     )
def upgrade():
    op.add_column('attributes',
                  sa.Column('node', sa.Integer, default=0))
    op.add_column('attributes',
                  sa.Column('is_latest', sa.Boolean, default=True))

    n = table('nodes',
              column('node', sa.Integer),
              column('latest_version', sa.Integer))
    v = table('versions',
              column('node', sa.Integer),
              column('serial', sa.Integer))
    a = table('attributes',
              column('serial', sa.Integer),
              column('node', sa.Integer),
              column('is_latest', sa.Boolean))

    s = sa.select([v.c.node]).where(v.c.serial == a.c.serial)
    u = a.update().values({'node': s})
    op.execute(u)

    s = sa.select([v.c.serial == n.c.latest_version],
                  and_(a.c.node == n.c.node, a.c.serial == v.c.serial))
    u = a.update().values({'is_latest': s})
    op.execute(u)

    op.alter_column('attributes', 'node', nullable=False)
    op.alter_column('attributes', 'is_latest', nullable=False)

    op.create_index('idx_attributes_serial_node', 'attributes',
                    ['serial', 'node'])
Beispiel #15
0
 def test_union(self):
     t1 = table("t1", column("col1"), column("col2"), column("col3"), column("col4"))
     t2 = table("t2", column("col1"), column("col2"), column("col3"), column("col4"))
     s1, s2 = (
         select([t1.c.col3.label("col3"), t1.c.col4.label("col4")], t1.c.col2.in_(["t1col2r1", "t1col2r2"])),
         select([t2.c.col3.label("col3"), t2.c.col4.label("col4")], t2.c.col2.in_(["t2col2r2", "t2col2r3"])),
     )
     u = union(s1, s2, order_by=["col3", "col4"])
     self.assert_compile(
         u,
         "SELECT t1.col3 AS col3, t1.col4 AS col4 "
         "FROM t1 WHERE t1.col2 IN (:col2_1, "
         ":col2_2) UNION SELECT t2.col3 AS col3, "
         "t2.col4 AS col4 FROM t2 WHERE t2.col2 IN "
         "(:col2_3, :col2_4) ORDER BY col3, col4",
     )
     self.assert_compile(
         u.alias("bar").select(),
         "SELECT bar.col3, bar.col4 FROM (SELECT "
         "t1.col3 AS col3, t1.col4 AS col4 FROM t1 "
         "WHERE t1.col2 IN (:col2_1, :col2_2) UNION "
         "SELECT t2.col3 AS col3, t2.col4 AS col4 "
         "FROM t2 WHERE t2.col2 IN (:col2_3, "
         ":col2_4)) AS bar",
     )
    def test_annotate_fromlist_preservation(self):
        """test the FROM list in select still works
        even when multiple annotate runs have created
        copies of the same selectable

        #2453, continued

        """
        table1 = table('table1', column('x'))
        table2 = table('table2', column('y'))
        a1 = table1.alias()
        s = select([a1.c.x]).select_from(
                a1.join(table2, a1.c.x==table2.c.y)
            )

        assert_s = select([select([s])])
        for fn in (
            sql_util._deep_deannotate,
            lambda s: sql_util._deep_annotate(s, {'foo':'bar'}),
            lambda s:visitors.cloned_traverse(s, {}, {}),
            lambda s:visitors.replacement_traverse(s, {}, lambda x:None)
        ):

            sel = fn(select([fn(select([fn(s)]))]))
            eq_(str(assert_s), str(sel))
    def test_annotate_unique_traversal(self):
        """test that items are copied only once during
        annotate, deannotate traversal

        #2453
        """
        table1 = table('table1', column('x'))
        table2 = table('table2', column('y'))
        a1 = table1.alias()
        s = select([a1.c.x]).select_from(
                a1.join(table2, a1.c.x==table2.c.y)
            )

        for sel in (
            sql_util._deep_deannotate(s),
            sql_util._deep_annotate(s, {'foo':'bar'}),
            visitors.cloned_traverse(s, {}, {}),
            visitors.replacement_traverse(s, {}, lambda x:None)
        ):
            # the columns clause isn't changed at all
            assert sel._raw_columns[0].table is a1
            # the from objects are internally consistent,
            # i.e. the Alias at position 0 is the same
            # Alias in the Join object in position 1
            assert sel._froms[0] is sel._froms[1].left
            eq_(str(s), str(sel))
Beispiel #18
0
 def test_join_with_hint(self):
     t1 = table("t1", column("a", Integer), column("b", String), column("c", String))
     t2 = table("t2", column("a", Integer), column("b", Integer), column("c", Integer))
     join = t1.join(t2, t1.c.a == t2.c.a).select().with_hint(t1, "WITH (NOLOCK)")
     self.assert_compile(
         join, "SELECT t1.a, t1.b, t1.c, t2.a, t2.b, t2.c " "FROM t1 WITH (NOLOCK) JOIN t2 ON t1.a = t2.a"
     )
Beispiel #19
0
    def test_inner_join(self):
        t1 = table("t1", column("x"))
        t2 = table("t2", column("y"))

        self.assert_compile(
            t1.join(t2, t1.c.x == t2.c.y), "t1 INNER JOIN t2 ON t1.x = t2.y"
        )
def upgrade():
    op.create_table('question_match',
        sa.Column('id', postgresql.UUID(), nullable=False),
        sa.Column('data', sa.Text(), nullable=True),
        sa.Column('score', sa.Float(), nullable=True),
        sa.ForeignKeyConstraint(['id'], ['question.id']),
        sa.PrimaryKeyConstraint('id')
    )

    question = sql.table('question',
        sql.column('id'),
        sql.column('match_data'),
        sql.column('match_score'))
    question_match = sql.table('question_match',
        sql.column('id'),
        sql.column('data'),
        sql.column('score'))

    op.execute(
        question_match.insert().from_select(
            ['id', 'data', 'score'],
            question.select().where(question.c.match_data != None)))

    op.drop_column('question', 'match_data')
    op.drop_column('question', 'match_score')
def upgrade():

    nodetype = table('NodeType',
                       column('id', Integer),
                       column('name', String(20)),
                       column('time', DateTime),
                       column('seq', Integer),
                       column('updated_seq', Integer),
                       column('period', Integer),
                       column('blink', Integer), #shouldbe tinyint
                       column('configured', String(10)))


    op.bulk_insert(nodetype,
                   [{'id': 7, 'name': "Gas Node",
                     'time': "2011-07-10 00:00:00",
                     'seq': 1,
                     'updated_seq': 0., 'period': 307200., 'blink': 0., 'configured': '31,4'}
                   ])
                   
    sensortype = table('SensorType',
                       column('id', Integer),
                       column('name', String(255)),
                       column('code', String(50)),
                       column('units', String(20)),
                       column('c0', Float),
                       column('c1', Float),
                       column('c2', Float),
                       column('c3', Float))
                       
    op.bulk_insert(sensortype,
                   [{'id': 43, 'name': "Gas Pulse Count",
                     'code': "imp",
                     'units': "imp",
                     'c0': 0., 'c1': 1., 'c2': 0., 'c3': 0.}])
    def test_alias(self):
        subq = t2.select().alias('subq')
        s = select([t1.c.col1, subq.c.col1], 
                    from_obj=[t1, subq, 
                        t1.join(subq, t1.c.col1==subq.c.col2)]
                    )
        orig = str(s)
        s2 = CloningVisitor().traverse(s)
        assert orig == str(s) == str(s2)

        s4 = CloningVisitor().traverse(s2)
        assert orig == str(s) == str(s2) == str(s4)

        s3 = sql_util.ClauseAdapter(table('foo')).traverse(s)
        assert orig == str(s) == str(s3)

        s4 = sql_util.ClauseAdapter(table('foo')).traverse(s3)
        assert orig == str(s) == str(s3) == str(s4)

        subq = subq.alias('subq')
        s = select([t1.c.col1, subq.c.col1], 
                    from_obj=[t1, subq, 
                        t1.join(subq, t1.c.col1==subq.c.col2)]
                    )
        s5 = CloningVisitor().traverse(s)
        assert orig == str(s) == str(s5)
Beispiel #23
0
    def test_pg_example_one(self):
        products = table("products", column("id"), column("date"))
        products_log = table("products_log", column("id"), column("date"))

        moved_rows = (
            products.delete()
            .where(
                and_(products.c.date >= "dateone", products.c.date < "datetwo")
            )
            .returning(*products.c)
            .cte("moved_rows")
        )

        stmt = products_log.insert().from_select(
            products_log.c, moved_rows.select()
        )
        self.assert_compile(
            stmt,
            "WITH moved_rows AS "
            "(DELETE FROM products WHERE products.date >= :date_1 "
            "AND products.date < :date_2 "
            "RETURNING products.id, products.date) "
            "INSERT INTO products_log (id, date) "
            "SELECT moved_rows.id, moved_rows.date FROM moved_rows",
        )
def upgrade():
    campaign_anon_view = table('campaign_anon_view',
        column('created_at', sa.DateTime),
        column('datetime', sa.DateTime))
    op.add_column('campaign_anon_view', sa.Column('datetime', sa.DateTime(), nullable=True))
    op.execute(campaign_anon_view.update().values(datetime=campaign_anon_view.c.created_at))
    op.alter_column('campaign_anon_view', 'datetime', nullable=False)
    op.create_index(op.f('ix_campaign_anon_view_datetime'), 'campaign_anon_view', ['datetime'], unique=False)

    campaign_view = table('campaign_view',
        column('created_at', sa.DateTime),
        column('datetime', sa.DateTime))
    op.add_column('campaign_view', sa.Column('datetime', sa.DateTime(), nullable=True))
    op.execute(campaign_view.update().values(datetime=campaign_view.c.created_at))
    op.alter_column('campaign_view', 'datetime', nullable=False)
    op.create_index(op.f('ix_campaign_view_datetime'), 'campaign_view', ['datetime'], unique=False)

    job_view_session = table('job_view_session',
        column('created_at', sa.DateTime),
        column('datetime', sa.DateTime))
    op.add_column('job_view_session', sa.Column('datetime', sa.DateTime(), nullable=True))
    op.execute(job_view_session.update().values(datetime=job_view_session.c.created_at))
    op.alter_column('job_view_session', 'datetime', nullable=False)
    op.create_index(op.f('ix_job_view_session_datetime'), 'job_view_session', ['datetime'], unique=False)

    job_impression = table('job_impression',
        column('created_at', sa.DateTime),
        column('datetime', sa.DateTime))
    op.add_column('job_impression', sa.Column('datetime', sa.DateTime(), nullable=True))
    op.execute(job_impression.update().values(datetime=job_impression.c.created_at))
    op.alter_column('job_impression', 'datetime', nullable=False)
    op.create_index(op.f('ix_job_impression_datetime'), 'job_impression', ['datetime'], unique=False)
Beispiel #25
0
def downgrade():
    op.add_column('queries', sa.Column('old_schedule', MutableDict.as_mutable(PseudoJSON), nullable=False, server_default=json.dumps({})))

    queries = table(
        'queries',
        sa.Column('schedule', MutableDict.as_mutable(PseudoJSON)),
        sa.Column('old_schedule', MutableDict.as_mutable(PseudoJSON)))

    op.execute(
        queries
            .update()
            .values({'old_schedule': queries.c.schedule}))

    op.drop_column('queries', 'schedule')
    op.add_column('queries', sa.Column('schedule', sa.String(length=10), nullable=True))

    queries = table(
        'queries',
        sa.Column('id', sa.Integer, primary_key=True),
        sa.Column('schedule', sa.String(length=10)),
        sa.Column('old_schedule', MutableDict.as_mutable(PseudoJSON)))

    conn = op.get_bind()
    for query in conn.execute(queries.select()):
        scheduleValue = query.old_schedule['interval']
        if scheduleValue <= 86400:
            scheduleValue = query.old_schedule['time']

        conn.execute(
            queries
                .update()
                .where(queries.c.id == query.id)
                .values(schedule=scheduleValue))

    op.drop_column('queries', 'old_schedule')
def _insert_operation_port_interface():
    tb = table(
        'operation_port_interface',
        column('id', Integer),
        column('color', String))
    columns = [c.name for c in tb.columns]
    data = [
        (20, '#ED254E'),
    ]
    rows = [dict(zip(columns, cat)) for cat in data]
    op.bulk_insert(tb, rows)

    tb = table(
        'operation_port_interface_translation',
        column('id', Integer),
        column('locale', String),
        column('name', String))

    columns = [c.name for c in tb.columns]
    data = [
        (20, 'en', 'TransformationModel'),
        (20, 'pt', 'TransformationModel'),
    ]
    rows = [dict(zip(columns, cat)) for cat in data]
    op.bulk_insert(tb, rows)
Beispiel #27
0
    def test_nonansi_nested_right_join(self):
        a = table("a", column("a"))
        b = table("b", column("b"))
        c = table("c", column("c"))

        j = a.join(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)

        self.assert_compile(
            select([j]),
            "SELECT a.a, b.b, c.c FROM a, b, c "
            "WHERE a.a = b.b AND b.b = c.c",
            dialect=oracle.OracleDialect(use_ansi=False),
        )

        j = a.outerjoin(b.join(c, b.c.b == c.c.c), a.c.a == b.c.b)

        self.assert_compile(
            select([j]),
            "SELECT a.a, b.b, c.c FROM a, b, c "
            "WHERE a.a = b.b(+) AND b.b = c.c",
            dialect=oracle.OracleDialect(use_ansi=False),
        )

        j = a.join(b.outerjoin(c, b.c.b == c.c.c), a.c.a == b.c.b)

        self.assert_compile(
            select([j]),
            "SELECT a.a, b.b, c.c FROM a, b, c "
            "WHERE a.a = b.b AND b.b = c.c(+)",
            dialect=oracle.OracleDialect(use_ansi=False),
        )
Beispiel #28
0
 def test_alias_outer_join(self):
     address_types = table("address_types", column("id"), column("name"))
     addresses = table(
         "addresses",
         column("id"),
         column("user_id"),
         column("address_type_id"),
         column("email_address"),
     )
     at_alias = address_types.alias()
     s = (
         select([at_alias, addresses])
         .select_from(
             addresses.outerjoin(
                 at_alias, addresses.c.address_type_id == at_alias.c.id
             )
         )
         .where(addresses.c.user_id == 7)
         .order_by(addresses.c.id, address_types.c.id)
     )
     self.assert_compile(
         s,
         "SELECT address_types_1.id, "
         "address_types_1.name, addresses.id, "
         "addresses.user_id, addresses.address_type_"
         "id, addresses.email_address FROM "
         "addresses LEFT OUTER JOIN address_types "
         "address_types_1 ON addresses.address_type_"
         "id = address_types_1.id WHERE "
         "addresses.user_id = :user_id_1 ORDER BY "
         "addresses.id, address_types.id",
     )
Beispiel #29
0
    def test_cte_refers_to_aliased_cte_twice(self):
        # test issue #4204
        a = table("a", column("id"))
        b = table("b", column("id"), column("fid"))
        c = table("c", column("id"), column("fid"))

        cte1 = select([a.c.id]).cte(name="cte1")

        aa = cte1.alias("aa")

        cte2 = (
            select([b.c.id])
            .select_from(b.join(aa, b.c.fid == aa.c.id))
            .cte(name="cte2")
        )

        cte3 = (
            select([c.c.id])
            .select_from(c.join(aa, c.c.fid == aa.c.id))
            .cte(name="cte3")
        )

        stmt = select([cte3.c.id, cte2.c.id]).select_from(
            cte2.join(cte3, cte2.c.id == cte3.c.id)
        )
        self.assert_compile(
            stmt,
            "WITH cte1 AS (SELECT a.id AS id FROM a), "
            "cte2 AS (SELECT b.id AS id FROM b "
            "JOIN cte1 AS aa ON b.fid = aa.id), "
            "cte3 AS (SELECT c.id AS id FROM c "
            "JOIN cte1 AS aa ON c.fid = aa.id) "
            "SELECT cte3.id, cte2.id FROM cte2 JOIN cte3 ON cte2.id = cte3.id",
        )
def upgrade():
    op.add_column('lu_population_number',
        sa.Column('name_ro', sa.UnicodeText, nullable=True))

    op.add_column('lu_population_units_restricted',
        sa.Column('name_ro', sa.UnicodeText, nullable=True))

    lu_pop_codes = table('lu_population_number',
        column('code', sa.String),
        column('name_ro', sa.UnicodeText))

    lu_pop_restrict_codes = table('lu_population_units_restricted',
        column('code', sa.String),
        column('name_ro', sa.UnicodeText))

    for code, name_ro in DATA:

        op.execute(
            lu_pop_codes.update()
                .where(lu_pop_codes.c.code == op.inline_literal(code))
                .values({'name_ro': op.inline_literal(name_ro)}))

        op.execute(
            lu_pop_restrict_codes.update()
                .where(lu_pop_restrict_codes.c.code == op.inline_literal(code))
                .values({'name_ro': op.inline_literal(name_ro)}))
Beispiel #31
0
 def test_select_with_nolock(self):
     t = table("sometable", column("somecolumn"))
     self.assert_compile(
         t.select().with_hint(t, "WITH (NOLOCK)"),
         "SELECT sometable.somecolumn FROM sometable WITH (NOLOCK)",
     )
Beispiel #32
0
def upgrade(tables, tester, progress_reporter):
    op = ProgressWrapper(original_op, progress_reporter)
    op.create_table(
        "tagkind",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("name", sa.String(length=255), nullable=False),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_tagkind")),
    )
    op.create_index("tagkind_name", "tagkind", ["name"], unique=True)

    op.create_table(
        "blobplacementlocation",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("name", sa.String(length=255), nullable=False),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_blobplacementlocation")),
    )
    op.create_index("blobplacementlocation_name",
                    "blobplacementlocation", ["name"],
                    unique=True)

    op.create_table(
        "blob",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("digest", sa.String(length=255), nullable=False),
        sa.Column("media_type_id", sa.Integer(), nullable=False),
        sa.Column("size", sa.BigInteger(), nullable=False),
        sa.Column("uncompressed_size", sa.BigInteger(), nullable=True),
        sa.ForeignKeyConstraint(["media_type_id"], ["mediatype.id"],
                                name=op.f("fk_blob_media_type_id_mediatype")),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_blob")),
    )
    op.create_index("blob_digest", "blob", ["digest"], unique=True)
    op.create_index("blob_media_type_id",
                    "blob", ["media_type_id"],
                    unique=False)

    op.create_table(
        "blobplacementlocationpreference",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("user_id", sa.Integer(), nullable=False),
        sa.Column("location_id", sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(
            ["location_id"],
            ["blobplacementlocation.id"],
            name=op.f("fk_blobplacementlocpref_locid_blobplacementlocation"),
        ),
        sa.ForeignKeyConstraint(
            ["user_id"], ["user.id"],
            name=op.f("fk_blobplacementlocationpreference_user_id_user")),
        sa.PrimaryKeyConstraint(
            "id", name=op.f("pk_blobplacementlocationpreference")),
    )
    op.create_index(
        "blobplacementlocationpreference_location_id",
        "blobplacementlocationpreference",
        ["location_id"],
        unique=False,
    )
    op.create_index(
        "blobplacementlocationpreference_user_id",
        "blobplacementlocationpreference",
        ["user_id"],
        unique=False,
    )

    op.create_table(
        "manifest",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("digest", sa.String(length=255), nullable=False),
        sa.Column("media_type_id", sa.Integer(), nullable=False),
        sa.Column("manifest_json", UTF8LongText, nullable=False),
        sa.ForeignKeyConstraint(
            ["media_type_id"], ["mediatype.id"],
            name=op.f("fk_manifest_media_type_id_mediatype")),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_manifest")),
    )
    op.create_index("manifest_digest", "manifest", ["digest"], unique=True)
    op.create_index("manifest_media_type_id",
                    "manifest", ["media_type_id"],
                    unique=False)

    op.create_table(
        "manifestlist",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("digest", sa.String(length=255), nullable=False),
        sa.Column("manifest_list_json", UTF8LongText, nullable=False),
        sa.Column("schema_version", UTF8CharField(length=255), nullable=False),
        sa.Column("media_type_id", sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(
            ["media_type_id"],
            ["mediatype.id"],
            name=op.f("fk_manifestlist_media_type_id_mediatype"),
        ),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_manifestlist")),
    )
    op.create_index("manifestlist_digest",
                    "manifestlist", ["digest"],
                    unique=True)
    op.create_index("manifestlist_media_type_id",
                    "manifestlist", ["media_type_id"],
                    unique=False)

    op.create_table(
        "bittorrentpieces",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("blob_id", sa.Integer(), nullable=False),
        sa.Column("pieces", UTF8LongText, nullable=False),
        sa.Column("piece_length", sa.BigInteger(), nullable=False),
        sa.ForeignKeyConstraint(["blob_id"], ["blob.id"],
                                name=op.f("fk_bittorrentpieces_blob_id_blob")),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_bittorrentpieces")),
    )
    op.create_index("bittorrentpieces_blob_id",
                    "bittorrentpieces", ["blob_id"],
                    unique=False)
    op.create_index(
        "bittorrentpieces_blob_id_piece_length",
        "bittorrentpieces",
        ["blob_id", "piece_length"],
        unique=True,
    )

    op.create_table(
        "blobplacement",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("blob_id", sa.Integer(), nullable=False),
        sa.Column("location_id", sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(["blob_id"], ["blob.id"],
                                name=op.f("fk_blobplacement_blob_id_blob")),
        sa.ForeignKeyConstraint(
            ["location_id"],
            ["blobplacementlocation.id"],
            name=op.f("fk_blobplacement_location_id_blobplacementlocation"),
        ),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_blobplacement")),
    )
    op.create_index("blobplacement_blob_id",
                    "blobplacement", ["blob_id"],
                    unique=False)
    op.create_index(
        "blobplacement_blob_id_location_id",
        "blobplacement",
        ["blob_id", "location_id"],
        unique=True,
    )
    op.create_index("blobplacement_location_id",
                    "blobplacement", ["location_id"],
                    unique=False)

    op.create_table(
        "blobuploading",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("uuid", sa.String(length=255), nullable=False),
        sa.Column("created", sa.DateTime(), nullable=False),
        sa.Column("repository_id", sa.Integer(), nullable=False),
        sa.Column("location_id", sa.Integer(), nullable=False),
        sa.Column("byte_count", sa.BigInteger(), nullable=False),
        sa.Column("uncompressed_byte_count", sa.BigInteger(), nullable=True),
        sa.Column("chunk_count", sa.BigInteger(), nullable=False),
        sa.Column("storage_metadata", UTF8LongText, nullable=True),
        sa.Column("sha_state", UTF8LongText, nullable=True),
        sa.Column("piece_sha_state", UTF8LongText, nullable=True),
        sa.Column("piece_hashes", UTF8LongText, nullable=True),
        sa.ForeignKeyConstraint(
            ["location_id"],
            ["blobplacementlocation.id"],
            name=op.f("fk_blobuploading_location_id_blobplacementlocation"),
        ),
        sa.ForeignKeyConstraint(
            ["repository_id"],
            ["repository.id"],
            name=op.f("fk_blobuploading_repository_id_repository"),
        ),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_blobuploading")),
    )
    op.create_index("blobuploading_created",
                    "blobuploading", ["created"],
                    unique=False)
    op.create_index("blobuploading_location_id",
                    "blobuploading", ["location_id"],
                    unique=False)
    op.create_index("blobuploading_repository_id",
                    "blobuploading", ["repository_id"],
                    unique=False)
    op.create_index("blobuploading_repository_id_uuid",
                    "blobuploading", ["repository_id", "uuid"],
                    unique=True)
    op.create_index("blobuploading_uuid",
                    "blobuploading", ["uuid"],
                    unique=True)

    op.create_table(
        "derivedimage",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("uuid", sa.String(length=255), nullable=False),
        sa.Column("source_manifest_id", sa.Integer(), nullable=False),
        sa.Column("derived_manifest_json", UTF8LongText, nullable=False),
        sa.Column("media_type_id", sa.Integer(), nullable=False),
        sa.Column("blob_id", sa.Integer(), nullable=False),
        sa.Column("uniqueness_hash", sa.String(length=255), nullable=False),
        sa.Column("signature_blob_id", sa.Integer(), nullable=True),
        sa.ForeignKeyConstraint(["blob_id"], ["blob.id"],
                                name=op.f("fk_derivedimage_blob_id_blob")),
        sa.ForeignKeyConstraint(
            ["media_type_id"],
            ["mediatype.id"],
            name=op.f("fk_derivedimage_media_type_id_mediatype"),
        ),
        sa.ForeignKeyConstraint(
            ["signature_blob_id"], ["blob.id"],
            name=op.f("fk_derivedimage_signature_blob_id_blob")),
        sa.ForeignKeyConstraint(
            ["source_manifest_id"],
            ["manifest.id"],
            name=op.f("fk_derivedimage_source_manifest_id_manifest"),
        ),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_derivedimage")),
    )
    op.create_index("derivedimage_blob_id",
                    "derivedimage", ["blob_id"],
                    unique=False)
    op.create_index("derivedimage_media_type_id",
                    "derivedimage", ["media_type_id"],
                    unique=False)
    op.create_index("derivedimage_signature_blob_id",
                    "derivedimage", ["signature_blob_id"],
                    unique=False)
    op.create_index("derivedimage_source_manifest_id",
                    "derivedimage", ["source_manifest_id"],
                    unique=False)
    op.create_index(
        "derivedimage_source_manifest_id_blob_id",
        "derivedimage",
        ["source_manifest_id", "blob_id"],
        unique=True,
    )
    op.create_index(
        "derivedimage_source_manifest_id_media_type_id_uniqueness_hash",
        "derivedimage",
        ["source_manifest_id", "media_type_id", "uniqueness_hash"],
        unique=True,
    )
    op.create_index("derivedimage_uniqueness_hash",
                    "derivedimage", ["uniqueness_hash"],
                    unique=True)
    op.create_index("derivedimage_uuid", "derivedimage", ["uuid"], unique=True)

    op.create_table(
        "manifestblob",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("manifest_id", sa.Integer(), nullable=False),
        sa.Column("blob_id", sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(["blob_id"], ["blob.id"],
                                name=op.f("fk_manifestblob_blob_id_blob")),
        sa.ForeignKeyConstraint(
            ["manifest_id"], ["manifest.id"],
            name=op.f("fk_manifestblob_manifest_id_manifest")),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_manifestblob")),
    )
    op.create_index("manifestblob_blob_id",
                    "manifestblob", ["blob_id"],
                    unique=False)
    op.create_index("manifestblob_manifest_id",
                    "manifestblob", ["manifest_id"],
                    unique=False)
    op.create_index("manifestblob_manifest_id_blob_id",
                    "manifestblob", ["manifest_id", "blob_id"],
                    unique=True)

    op.create_table(
        "manifestlabel",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("repository_id", sa.Integer(), nullable=False),
        sa.Column("annotated_id", sa.Integer(), nullable=False),
        sa.Column("label_id", sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(
            ["annotated_id"], ["manifest.id"],
            name=op.f("fk_manifestlabel_annotated_id_manifest")),
        sa.ForeignKeyConstraint(["label_id"], ["label.id"],
                                name=op.f("fk_manifestlabel_label_id_label")),
        sa.ForeignKeyConstraint(
            ["repository_id"],
            ["repository.id"],
            name=op.f("fk_manifestlabel_repository_id_repository"),
        ),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_manifestlabel")),
    )
    op.create_index("manifestlabel_annotated_id",
                    "manifestlabel", ["annotated_id"],
                    unique=False)
    op.create_index("manifestlabel_label_id",
                    "manifestlabel", ["label_id"],
                    unique=False)
    op.create_index("manifestlabel_repository_id",
                    "manifestlabel", ["repository_id"],
                    unique=False)
    op.create_index(
        "manifestlabel_repository_id_annotated_id_label_id",
        "manifestlabel",
        ["repository_id", "annotated_id", "label_id"],
        unique=True,
    )

    op.create_table(
        "manifestlayer",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("blob_id", sa.Integer(), nullable=False),
        sa.Column("manifest_id", sa.Integer(), nullable=False),
        sa.Column("manifest_index", sa.BigInteger(), nullable=False),
        sa.Column("metadata_json", UTF8LongText, nullable=False),
        sa.ForeignKeyConstraint(["blob_id"], ["blob.id"],
                                name=op.f("fk_manifestlayer_blob_id_blob")),
        sa.ForeignKeyConstraint(
            ["manifest_id"], ["manifest.id"],
            name=op.f("fk_manifestlayer_manifest_id_manifest")),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_manifestlayer")),
    )
    op.create_index("manifestlayer_blob_id",
                    "manifestlayer", ["blob_id"],
                    unique=False)
    op.create_index("manifestlayer_manifest_id",
                    "manifestlayer", ["manifest_id"],
                    unique=False)
    op.create_index(
        "manifestlayer_manifest_id_manifest_index",
        "manifestlayer",
        ["manifest_id", "manifest_index"],
        unique=True,
    )
    op.create_index("manifestlayer_manifest_index",
                    "manifestlayer", ["manifest_index"],
                    unique=False)

    op.create_table(
        "manifestlistmanifest",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("manifest_list_id", sa.Integer(), nullable=False),
        sa.Column("manifest_id", sa.Integer(), nullable=False),
        sa.Column("operating_system", UTF8CharField(length=255),
                  nullable=True),
        sa.Column("architecture", UTF8CharField(length=255), nullable=True),
        sa.Column("platform_json", UTF8LongText, nullable=True),
        sa.Column("media_type_id", sa.Integer(), nullable=False),
        sa.ForeignKeyConstraint(
            ["manifest_id"],
            ["manifest.id"],
            name=op.f("fk_manifestlistmanifest_manifest_id_manifest"),
        ),
        sa.ForeignKeyConstraint(
            ["manifest_list_id"],
            ["manifestlist.id"],
            name=op.f("fk_manifestlistmanifest_manifest_list_id_manifestlist"),
        ),
        sa.ForeignKeyConstraint(
            ["media_type_id"],
            ["mediatype.id"],
            name=op.f("fk_manifestlistmanifest_media_type_id_mediatype"),
        ),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_manifestlistmanifest")),
    )
    op.create_index("manifestlistmanifest_manifest_id",
                    "manifestlistmanifest", ["manifest_id"],
                    unique=False)
    op.create_index(
        "manifestlistmanifest_manifest_list_id",
        "manifestlistmanifest",
        ["manifest_list_id"],
        unique=False,
    )
    op.create_index(
        "manifestlistmanifest_manifest_listid_os_arch_mtid",
        "manifestlistmanifest",
        [
            "manifest_list_id", "operating_system", "architecture",
            "media_type_id"
        ],
        unique=False,
    )
    op.create_index(
        "manifestlistmanifest_manifest_listid_mtid",
        "manifestlistmanifest",
        ["manifest_list_id", "media_type_id"],
        unique=False,
    )
    op.create_index(
        "manifestlistmanifest_media_type_id",
        "manifestlistmanifest",
        ["media_type_id"],
        unique=False,
    )

    op.create_table(
        "tag",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("name", UTF8CharField(length=190), nullable=False),
        sa.Column("repository_id", sa.Integer(), nullable=False),
        sa.Column("manifest_list_id", sa.Integer(), nullable=True),
        sa.Column("lifetime_start", sa.BigInteger(), nullable=False),
        sa.Column("lifetime_end", sa.BigInteger(), nullable=True),
        sa.Column("hidden", sa.Boolean(), nullable=False),
        sa.Column("reverted", sa.Boolean(), nullable=False),
        sa.Column("protected", sa.Boolean(), nullable=False),
        sa.Column("tag_kind_id", sa.Integer(), nullable=False),
        sa.Column("linked_tag_id", sa.Integer(), nullable=True),
        sa.ForeignKeyConstraint(["linked_tag_id"], ["tag.id"],
                                name=op.f("fk_tag_linked_tag_id_tag")),
        sa.ForeignKeyConstraint(
            ["manifest_list_id"],
            ["manifestlist.id"],
            name=op.f("fk_tag_manifest_list_id_manifestlist"),
        ),
        sa.ForeignKeyConstraint(["repository_id"], ["repository.id"],
                                name=op.f("fk_tag_repository_id_repository")),
        sa.ForeignKeyConstraint(["tag_kind_id"], ["tagkind.id"],
                                name=op.f("fk_tag_tag_kind_id_tagkind")),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_tag")),
    )
    op.create_index("tag_lifetime_end", "tag", ["lifetime_end"], unique=False)
    op.create_index("tag_linked_tag_id",
                    "tag", ["linked_tag_id"],
                    unique=False)
    op.create_index("tag_manifest_list_id",
                    "tag", ["manifest_list_id"],
                    unique=False)
    op.create_index("tag_repository_id",
                    "tag", ["repository_id"],
                    unique=False)
    op.create_index("tag_repository_id_name_hidden",
                    "tag", ["repository_id", "name", "hidden"],
                    unique=False)
    op.create_index(
        "tag_repository_id_name_lifetime_end",
        "tag",
        ["repository_id", "name", "lifetime_end"],
        unique=True,
    )
    op.create_index("tag_repository_id_name",
                    "tag", ["repository_id", "name"],
                    unique=False)
    op.create_index("tag_tag_kind_id", "tag", ["tag_kind_id"], unique=False)

    op.create_table(
        "manifestlayerdockerv1",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("manifest_layer_id", sa.Integer(), nullable=False),
        sa.Column("image_id", UTF8CharField(length=255), nullable=False),
        sa.Column("checksum", UTF8CharField(length=255), nullable=False),
        sa.Column("compat_json", UTF8LongText, nullable=False),
        sa.ForeignKeyConstraint(
            ["manifest_layer_id"],
            ["manifestlayer.id"],
            name=op.f(
                "fk_manifestlayerdockerv1_manifest_layer_id_manifestlayer"),
        ),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_manifestlayerdockerv1")),
    )
    op.create_index("manifestlayerdockerv1_image_id",
                    "manifestlayerdockerv1", ["image_id"],
                    unique=False)
    op.create_index(
        "manifestlayerdockerv1_manifest_layer_id",
        "manifestlayerdockerv1",
        ["manifest_layer_id"],
        unique=False,
    )

    op.create_table(
        "manifestlayerscan",
        sa.Column("id", sa.Integer(), nullable=False),
        sa.Column("layer_id", sa.Integer(), nullable=False),
        sa.Column("scannable", sa.Boolean(), nullable=False),
        sa.Column("scanned_by", UTF8CharField(length=255), nullable=False),
        sa.ForeignKeyConstraint(
            ["layer_id"],
            ["manifestlayer.id"],
            name=op.f("fk_manifestlayerscan_layer_id_manifestlayer"),
        ),
        sa.PrimaryKeyConstraint("id", name=op.f("pk_manifestlayerscan")),
    )
    op.create_index("manifestlayerscan_layer_id",
                    "manifestlayerscan", ["layer_id"],
                    unique=True)

    blobplacementlocation_table = table(
        "blobplacementlocation",
        column("id", sa.Integer()),
        column("name", sa.String()),
    )

    op.bulk_insert(
        blobplacementlocation_table,
        [
            {
                "name": "local_eu"
            },
            {
                "name": "local_us"
            },
        ],
    )

    op.bulk_insert(
        tables.mediatype,
        [
            {
                "name": "application/vnd.cnr.blob.v0.tar+gzip"
            },
            {
                "name": "application/vnd.cnr.package-manifest.helm.v0.json"
            },
            {
                "name": "application/vnd.cnr.package-manifest.kpm.v0.json"
            },
            {
                "name":
                "application/vnd.cnr.package-manifest.docker-compose.v0.json"
            },
            {
                "name": "application/vnd.cnr.package.kpm.v0.tar+gzip"
            },
            {
                "name": "application/vnd.cnr.package.helm.v0.tar+gzip"
            },
            {
                "name":
                "application/vnd.cnr.package.docker-compose.v0.tar+gzip"
            },
            {
                "name": "application/vnd.cnr.manifests.v0.json"
            },
            {
                "name": "application/vnd.cnr.manifest.list.v0.json"
            },
        ],
    )

    tagkind_table = table(
        "tagkind",
        column("id", sa.Integer()),
        column("name", sa.String()),
    )

    op.bulk_insert(
        tagkind_table,
        [
            {
                "id": 1,
                "name": "tag"
            },
            {
                "id": 2,
                "name": "release"
            },
            {
                "id": 3,
                "name": "channel"
            },
        ],
    )
Beispiel #33
0
 def get_sqla_table(self):
     tbl = table(self.table_name)
     if self.schema:
         tbl.schema = self.schema
     return tbl
Beispiel #34
0
 def test_select(self):
     t = table("sometable", column("somecolumn"))
     self.assert_compile(
         t.select(), "SELECT sometable.somecolumn FROM sometable"
     )
Beispiel #35
0
class MySQLForUpdateCompileTest(fixtures.TestBase, AssertsCompiledSQL):
    __dialect__ = mysql.dialect()

    table1 = table("mytable", column("myid"), column("name"),
                   column("description"))
    table2 = table("table2", column("mytable_id"))
    join = table2.join(table1, table2.c.mytable_id == table1.c.myid)
    for_update_of_dialect = mysql.dialect()
    for_update_of_dialect.server_version_info = (8, 0, 0)
    for_update_of_dialect.supports_for_update_of = True

    def test_for_update_basic(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s FOR UPDATE",
        )

    def test_for_update_read(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                read=True),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s LOCK IN SHARE MODE",
        )

    def test_for_update_skip_locked(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                skip_locked=True),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "FOR UPDATE SKIP LOCKED",
        )

    def test_for_update_read_and_skip_locked(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                read=True, skip_locked=True),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "LOCK IN SHARE MODE SKIP LOCKED",
        )

    def test_for_update_nowait(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                nowait=True),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "FOR UPDATE NOWAIT",
        )

    def test_for_update_read_and_nowait(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                read=True, nowait=True),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "LOCK IN SHARE MODE NOWAIT",
        )

    def test_for_update_of_nowait(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                of=self.table1, nowait=True),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "FOR UPDATE OF mytable NOWAIT",
            dialect=self.for_update_of_dialect,
        )

    def test_for_update_of_basic(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                of=self.table1),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "FOR UPDATE OF mytable",
            dialect=self.for_update_of_dialect,
        )

    def test_for_update_of_skip_locked(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                of=self.table1, skip_locked=True),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "FOR UPDATE OF mytable SKIP LOCKED",
            dialect=self.for_update_of_dialect,
        )

    def test_for_update_of_join_one(self):
        self.assert_compile(
            self.join.select(self.table2.c.mytable_id == 7).with_for_update(
                of=[self.join]),
            "SELECT table2.mytable_id, "
            "mytable.myid, mytable.name, mytable.description "
            "FROM table2 "
            "INNER JOIN mytable ON table2.mytable_id = mytable.myid "
            "WHERE table2.mytable_id = %s "
            "FOR UPDATE OF mytable, table2",
            dialect=self.for_update_of_dialect,
        )

    def test_for_update_of_column_list_aliased(self):
        ta = self.table1.alias()
        self.assert_compile(
            ta.select(ta.c.myid == 7).with_for_update(
                of=[ta.c.myid, ta.c.name]),
            "SELECT mytable_1.myid, mytable_1.name, mytable_1.description "
            "FROM mytable AS mytable_1 "
            "WHERE mytable_1.myid = %s FOR UPDATE OF mytable_1",
            dialect=self.for_update_of_dialect,
        )

    def test_for_update_of_join_aliased(self):
        ta = self.table1.alias()
        alias_join = self.table2.join(ta,
                                      self.table2.c.mytable_id == ta.c.myid)
        self.assert_compile(
            alias_join.select(self.table2.c.mytable_id == 7).with_for_update(
                of=[alias_join]),
            "SELECT table2.mytable_id, "
            "mytable_1.myid, mytable_1.name, mytable_1.description "
            "FROM table2 "
            "INNER JOIN mytable AS mytable_1 "
            "ON table2.mytable_id = mytable_1.myid "
            "WHERE table2.mytable_id = %s "
            "FOR UPDATE OF mytable_1, table2",
            dialect=self.for_update_of_dialect,
        )

    def test_for_update_of_read_nowait(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                read=True, of=self.table1, nowait=True),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "LOCK IN SHARE MODE OF mytable NOWAIT",
            dialect=self.for_update_of_dialect,
        )

    def test_for_update_of_read_skip_locked(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                read=True, of=self.table1, skip_locked=True),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "LOCK IN SHARE MODE OF mytable SKIP LOCKED",
            dialect=self.for_update_of_dialect,
        )

    def test_for_update_of_read_nowait_column_list(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                read=True,
                of=[self.table1.c.myid, self.table1.c.name],
                nowait=True,
            ),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "LOCK IN SHARE MODE OF mytable NOWAIT",
            dialect=self.for_update_of_dialect,
        )

    def test_for_update_of_read(self):
        self.assert_compile(
            self.table1.select(self.table1.c.myid == 7).with_for_update(
                read=True, of=self.table1),
            "SELECT mytable.myid, mytable.name, mytable.description "
            "FROM mytable WHERE mytable.myid = %s "
            "LOCK IN SHARE MODE OF mytable",
            dialect=self.for_update_of_dialect,
        )
Beispiel #36
0
    def query(self,
              groupby,
              metrics,
              granularity,
              from_dttm,
              to_dttm,
              custom_query,
              limit_spec=None,
              filter=None,
              is_timeseries=True,
              timeseries_limit=15,
              row_limit=None,
              inner_from_dttm=None,
              inner_to_dttm=None,
              extras=None,
              columns=None):

        qry_start_dttm = datetime.now()

        if not custom_query:
            # For backward compatibility
            if granularity not in self.dttm_cols:
                granularity = self.main_dttm_col
            cols = {col.column_name: col for col in self.columns}
            if not self.main_dttm_col:
                raise Exception(
                    "Datetime column not provided as part table configuration")
            dttm_expr = cols[granularity].expression

            if dttm_expr:
                timestamp = ColumnClause(dttm_expr,
                                         is_literal=True).label('timestamp')
            else:
                timestamp = literal_column(granularity).label('timestamp')

            metrics_exprs = [
                literal_column(m.expression).label(m.metric_name)
                for m in self.metrics if m.metric_name in metrics
            ]

            if metrics:
                main_metric_expr = literal_column([
                    m.expression for m in self.metrics
                    if m.metric_name == metrics[0]
                ][0])
            else:
                main_metric_expr = literal_column("COUNT(*)")

            groupby_exprs = []
            select_exprs = []

            if groupby:
                inner_select_exprs = []
                inner_groupby_exprs = []
                for s in groupby:
                    col = cols[s]
                    expr = col.expression
                    if expr:
                        outer = ColumnClause(expr, is_literal=True).label(s)
                        inner = ColumnClause(expr,
                                             is_literal=True).label('__' + s)
                    else:
                        outer = column(s).label(s)
                        inner = column(s).label('__' + s)

                    groupby_exprs.append(outer)
                    select_exprs.append(outer)
                    inner_groupby_exprs.append(inner)
                    inner_select_exprs.append(inner)
            elif columns:
                for s in columns:
                    select_exprs.append(s)
                metrics_exprs = []

            if is_timeseries:
                select_exprs += [timestamp]
                groupby_exprs += [timestamp]

            select_exprs += metrics_exprs
            qry = select(select_exprs)
            from_clause = table(self.table_name)
            if not columns:
                qry = qry.group_by(*groupby_exprs)

            time_filter = [
                timestamp >= from_dttm.isoformat(),
                timestamp <= to_dttm.isoformat(),
            ]
            inner_time_filter = copy(time_filter)
            if inner_from_dttm:
                inner_time_filter[0] = timestamp >= inner_from_dttm.isoformat()
            if inner_to_dttm:
                inner_time_filter[1] = timestamp <= inner_to_dttm.isoformat()
            where_clause_and = []
            having_clause_and = []
            for col, op, eq in filter:
                col_obj = cols[col]
                if op in ('in', 'not in'):
                    values = eq.split(",")
                    if col_obj.expression:
                        cond = ColumnClause(col_obj.expression,
                                            is_literal=True).in_(values)
                    else:
                        cond = column(col).in_(values)
                    if op == 'not in':
                        cond = ~cond
                    where_clause_and.append(cond)
            if extras and 'where' in extras:
                where_clause_and += [text(extras['where'])]
            if extras and 'having' in extras:
                having_clause_and += [text(extras['having'])]
            qry = qry.where(and_(*(time_filter + where_clause_and)))
            qry = qry.having(and_(*having_clause_and))
            if groupby:
                qry = qry.order_by(desc(main_metric_expr))
            qry = qry.limit(row_limit)

            if timeseries_limit and groupby:
                subq = select(inner_select_exprs)
                subq = subq.select_from(table(self.table_name))
                subq = subq.where(and_(*(where_clause_and +
                                         inner_time_filter)))
                subq = subq.group_by(*inner_groupby_exprs)
                subq = subq.order_by(desc(main_metric_expr))
                subq = subq.limit(timeseries_limit)
                on_clause = []
                for i, gb in enumerate(groupby):
                    on_clause.append(groupby_exprs[i] == column("__" + gb))

                from_clause = from_clause.join(subq.alias(), and_(*on_clause))

            qry = qry.select_from(from_clause)

            engine = self.database.get_sqla_engine()
            sql = str(
                qry.compile(engine, compile_kwargs={"literal_binds": True}))
            df = read_sql_query(sql=sql, con=engine)
            textwrap.dedent(sql)

        else:
            """
            Legacy way of querying by building a SQL string without
            using the sqlalchemy expression API (new approach which supports
            all dialects)
            """
            engine = self.database.get_sqla_engine()
            sql = custom_query.format(**locals())
            df = read_sql_query(sql=sql, con=engine)
            textwrap.dedent(sql)

        return QueryResult(df=df,
                           duration=datetime.now() - qry_start_dttm,
                           query=sql)
Beispiel #37
0
def _insert_operation_form_field():
    tb = table(
        'operation_form_field',
        column('id', Integer),
        column('name', String),
        column('type', String),
        column('required', Integer),
        column('order', Integer),
        column('default', Text),
        column('suggested_widget', String),
        column('values_url', String),
        column('values', String),
        column('scope', String),
        column('form_id', Integer),
        column('enable_conditions', String),
    )

    columns = ('id', 'name', 'type', 'required', 'order', 'default',
               'suggested_widget', 'values_url', 'values', 'scope', 'form_id',
               'enable_conditions')

    data = [
        #Flatten - data_format
        (4240, 'subsample', 'DECIMAL', 0, 7, 1.0, 'decimal', None, None,
         'EXECUTION', 4003, None),
        (4241, 'criterion', 'TEXT', 0, 8, 'friedman_mse', 'dropdown', None,
         json.dumps([
             {
                 'key': 'friedman_mse',
                 'value': 'friedman_mse'
             },
             {
                 'key': 'mse',
                 'value': 'mse'
             },
             {
                 'key': 'mae',
                 'value': 'mae'
             },
         ]), 'EXECUTION', 4003, None),
        (4242, 'min_weight_fraction_leaf', 'DECIMAL', 0, 9, 0, 'decimal', None,
         None, 'EXECUTION', 4003, None),
        (4243, 'max_depth', 'INTEGER', 0, 10, 3, 'integer', None, None,
         'EXECUTION', 4003, None),
        (4244, 'min_impurity_decrease', 'DECIMAL', 0, 11, 0, 'decimal', None,
         None, 'EXECUTION', 4003, None),
        (4245, 'init', 'TEXT', 0, 12, None, 'text', None, None, 'EXECUTION',
         4003, None),
        (4246, 'max_features', 'TEXT', 0, 13, None, 'dropdown', None,
         json.dumps([
             {
                 'key': 'auto',
                 'value': 'auto'
             },
             {
                 'key': 'sqrt',
                 'value': 'sqrt'
             },
             {
                 'key': 'log2',
                 'value': 'log2'
             },
         ]), 'EXECUTION', 4003, None),
        (4247, 'verbose', 'INTEGER', 0, 14, 0, 'integer', None, None,
         'EXECUTION', 4003, None),
        (4248, 'max_leaf_nodes', 'INTEGER', 0, 15, None, 'integer', None, None,
         'EXECUTION', 4003, None),
        (4249, 'warm_start', 'INTEGER', 0, 16, 0, 'integer', None, None,
         'EXECUTION', 4003, None),
        (4250, 'presort', 'TEXT', 0, 17, None, 'auto', None, None, 'EXECUTION',
         4003, None),
        (4251, 'validation_fraction', 'DECIMAL', 0, 18, 0.1, 'decimal', None,
         None, 'EXECUTION', 4003, None),
        (4252, 'n_iter_no_change', 'INTEGER', 0, 19, None, 'integer', None,
         None, 'EXECUTION', 4003, None),
        (4253, 'tol', 'DECIMAL', 0, 20, 1e-4, 'decimal', None, None,
         'EXECUTION', 4003, None),
    ]
    rows = [dict(list(zip(columns, row))) for row in data]
    op.bulk_insert(tb, rows)
Beispiel #38
0
def _insert_operation_form_field_translation():
    tb = table(
        'operation_form_field_translation',
        column('id', Integer),
        column('locale', String),
        column('label', String),
        column('help', String),
    )

    columns = ('id', 'locale', 'label', 'help')
    data = [
        #Flatten - data_format
        (4240, 'en', 'Subsample',
         'The fraction of samples to be used for fitting the individual base learners.'
         ),
        (4240, 'pt', 'Subamostra',
         'A fração de amostras a serem usadas para ajustar os aprendizes da base'
         ' individual.'),
        (4241, 'en', 'Criterion',
         'The function to measure the quality of a split.'),
        (4241, 'pt', 'Critério',
         'A função para medir a qualidade de uma divisão.'),
        (4242, 'en', 'Minimum weighted fraction leaf',
         'The minimum weighted fraction of the sum total of weights (of '
         'all the input samples) required to be at a leaf node.'),
        (4242, 'pt', 'Folha de fração ponderada mínima',
         'A fração ponderada mínima da soma total de pesos (de todas as'
         ' amostras de entrada) necessária para estar em um nó folha.'),
        (4243, 'en', 'Maximum depth',
         'Maximum depth of the individual regression estimators.The maximum depth limits'
         ' the number of nodes in the tree.'),
        (4243, 'pt', 'Profundidade máxima',
         'Profundidade máxima dos estimadores de regressão individuais. A'
         ' profundidade máxima limita o número de nós na árvore.'),
        (4244, 'en', 'Minimum impurity decrease',
         'A node will be split if this split induces a decrease of the'
         ' impurity greater than or equal to this value.'),
        (4244, 'pt', 'Diminuição mínima da impureza',
         'Um nó será dividido se essa divisão induzir uma diminuição da'
         ' impureza maior ou igual a esse valor.'),
        (4245, 'en', 'Init',
         'An estimator object that is used to compute the initial predictions. If ‘zero’, the'
         ' initial raw predictions are set to zero. By default, a DummyEstimator predicting the'
         ' classes priors is used.'),
        (4245, 'pt', 'Previsões iniciais',
         'Um objeto estimador usado para calcular as previsões iniciais. Se "zero",'
         ' as previsões brutas iniciais são definidas como zero. Por padrão, um'
         ' DummyEstimator que prevê as classes anteriores é usado.'),
        (4246, 'en', 'Maximum features',
         'The number of features to consider when looking for the best split.'
         ),
        (4246, 'pt', 'Recursos máximos',
         'O número de recursos a serem considerados ao procurar a melhor divisão.'
         ),
        (4247, 'en', 'Verbose',
         'Enable verbose output. If 1 then it prints progress and performance once in a while'
         ' (the more trees the lower the frequency). If greater than 1 then it prints progress'
         ' and performance for every tree.'),
        (4247, 'pt', 'Saída detalhada',
         'Ativar saída detalhada. Se 1, ele imprime progresso e desempenho de vez em'
         ' quando (quanto mais árvores, menor a frequência). Se maior que 1, imprime o'
         ' progresso e o desempenho de todas as árvores.'),
        (4248, 'en', 'Maximum leaf nodes',
         'Grow trees with max_leaf_nodes in best-first fashion.'),
        (4248, 'pt', 'Número máximo de nós foliares',
         'Cultive árvores com max_leaf_nodes da melhor maneira possível.'),
        (4249, 'en', 'Warm start',
         'When set to True, reuse the solution of the previous call to fit and add more'
         ' estimators to the ensemble, otherwise, just erase the previous solution.'
         ),
        (4249, 'pt', 'Começo pré-definido',
         'Quando definido como True, reutilize a solução da chamada anterior do fit'
         ' e adicione mais estimadores ao conjunto, caso contrário, apenas apague a'
         ' solução anterior.'),
        (4250, 'en', 'Presort',
         'Whether to presort the data to speed up the finding of best splits in fitting.'
         ),
        (4250, 'pt', 'Pré-classificação',
         'Se os dados devem ser pré-classificados para acelerar a localização das'
         ' melhores divisões no ajuste.'),
        (4251, 'en', 'Validation fraction',
         'The proportion of training data to set aside as validation set for early'
         ' stopping.'),
        (4251, 'pt', 'Fração de validação',
         'A proporção de dados de treinamento a serem retirados como validação'
         ' definida para parada antecipada.'),
        (4252, 'en', 'Number of iterations with no change',
         'Used to decide if early stopping will be used to'
         ' terminate training when validation score is not'
         ' improving.'),
        (4252, 'pt', 'Número de iterações sem alteração',
         'Usado para decidir se a parada precoce será usada para'
         ' encerrar o treinamento quando a pontuação da validação não'
         ' estiver melhorando.'),
        (4253, 'en', 'Tolerance', 'Tolerance for the early stopping.'),
        (4253, 'pt', 'Tolerância', 'Tolerância à parada antecipada.'),
    ]
    rows = [dict(list(zip(columns, row))) for row in data]
    op.bulk_insert(tb, rows)
"""use automixmon features instead of automixmon

Revision ID: 500ca962c3a8
Revises: 2ea8d6cb26f6

"""

# revision identifiers, used by Alembic.
revision = '500ca962c3a8'
down_revision = '2ea8d6cb26f6'

from alembic import op
from sqlalchemy import sql, and_

features_table = sql.table('features', sql.column('category'),
                           sql.column('var_name'))


def upgrade():
    op.execute(features_table.update().where(
        and_(features_table.c.category == 'featuremap',
             features_table.c.var_name == 'automon')).values(
                 var_name='automixmon'))


def downgrade():
    op.execute(features_table.update().where(
        and_(features_table.c.category == 'featuremap',
             features_table.c.var_name == 'automixmon')).values(
                 var_name='automon'))
Create Date: 2014-05-19 17:59:34.819427

"""

# revision identifiers, used by Alembic.
revision = '1d1573d5812f'
down_revision = '5560713e91b2'

from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column, select, and_

contexts_table = table(
    'contexts',
    column('id', sa.Integer),
    column('context_id', sa.Integer),
    column('related_object_id', sa.Integer),
    column('related_object_type', sa.String),
)

programs_table = table(
    'programs',
    column('id', sa.Integer),
    column('context_id', sa.Integer),
)


def upgrade():
    connection = op.get_bind()

    programs = connection.execute(
def upgrade():
    documents = table('documents', column('version_id', String),
                      column('type', String), column('content_type', String),
                      column('content', String))

    html_content = """
          <section>
        <p>The parties to these “BC Registry Terms” (the “Agreement”) are Her Majesty the Queen in Right of the Province of British Columbia, as represented by the Minister of Citizens’ Services (the “Province”) and the Subscriber (as defined below).</p>
      </section>
      <section>
        <header>1. <u>DEFINITIONS</u></header>
        <ul>
          <li><span>a.</span><strong>"Access"</strong> means the non-exclusive right to electronically access and use the Service;</li>
          <li><span>b.</span><strong>"Additional Terms"</strong> means, as applicable to the Subscriber’s use of the Service, any of the BC Online Terms and Conditions, the API Agreement, the BC Services Card Terms, the BCeID Terms, the PAD Agreement, or any combination of the foregoing;</li>
		  <li><span>c.</span><strong>“Authenticate” or “Authentication”</strong> means the process of verifying a Subscriber or Team Member’s identity for the purpose of obtaining Access, and may include the use of a mobile Services Card, BCeID Information or a notarized affidavit, as applicable;</li>
          <li><span>d.</span><strong>"Basic Account Subscriber"</strong> means a Subscriber with Access for up to ten Transactions per month paying Fees for Transactions using a credit card or online banking;</li>
          <li><span>e.</span><strong>"BCeID Information"</strong> means a BCeID account user ID or password, which authenticates the identity of the Subscriber or a Team Member, as the case may be, to the Service if the Subscriber or a Team Member uses a BCeID for this purpose;</li>
          <li><span>f.</span><strong>"Commencement Date"</strong> means the date on which the Subscriber accepts the terms of this Agreement as part of the application process for Access;</li>
          <li><span>g.</span><strong>"Content"</strong> means the Service’s Data Bases, and all associated information and documentation, including any print copy or electronic display of any information retrieved from the Data Base and associated with the Service;</li>
          <li><span>h.</span><strong>"Data Base"</strong> means any data base or information stored in electronic format for which Access is made available through the Service;</li>
          <li><span>i.</span><strong>"Deposit Account"</strong> has the meaning given to it in the BC Online Terms and Conditions;</li>
          <li><span>j.</span><strong>"Entity"</strong> means any legal entity (including a registered society, business, or co-operative) for which certain Subscribers and Team Members may have Access through the Service;</li>
          <li><span>k.</span><strong>"Fees"</strong> means all fees and charges for the Service, as described on the Website[LVAA1][MOU2], and includes without limitation, any expenses or charges incurred for Transactions, including any applicable Service Fee described in section 8.7 of this Agreement;</li>
          <li><span>l.</span><strong>"Incorporation Number"</strong> means the unique numerical identifier for a Subscriber’s Entity, and when entered in conjunction with the Passcode, permits a Team Member to perform transactions with regard to that Entity;</li>
          <li><span>m.</span><strong>“PAD Agreement”</strong> means the agreement referenced in section 8.4;</li>
          <li><span>n.</span><strong>"Passcode"</strong> means the unique identifier issued by the Province to a Subscriber with regard to existing Entities on the Service, which enables a Team Member to have Access with regard to those Entities;</li>
          <li><span>o.</span><strong>"Premium Account Subscriber"</strong> means a Subscriber with Access to unlimited Transactions that has either a Deposit Account with the Province and is charged Fees in accordance with the BC Online Terms and Conditions or has entered into a PAD Agreement and is charged Fees in accordance with that agreement;</li>
          <li><span>p.</span><strong>"Service"</strong> means all products and services available through BC Registries that may be utilized by Subscriber or any of its Team Members and includes Access, Transactions and the API;</li>
          <li><span>q.</span><strong>"Services Card"</strong> means the Subscriber’s BC Services Card, which authenticates the identity of the Subscriber, or a Team Member, as the case may be, to the Service if the Subscriber or a Team Member uses a BC Services Card for this purpose;</li>
          <li><span>r.</span><strong>"Subscriber"</strong> means a person that accesses the Service and that has accepted the terms of this Agreement, and includes Premium Account Subscribers and Basic Account Subscribers;</li>
          <li><span>s.</span><strong>"Team Member"</strong> means an individual that is granted Access on the individual’s behalf, if the individual is also the Subscriber, or on behalf of the Subscriber, if the individual is an employee or is otherwise authorized to act on behalf of the Subscriber, as applicable;</li>
          <li><span>t.</span><strong>"Transaction"</strong> means any action performed by the Subscriber or any of its Team Members with regard to the Service to display, print, transfer, or obtain a copy of information contained on the Service,  where permitted by the Province, to add to or delete information from the Service; or any other action necessary to make use of the Service; and</li>
          <li><span>u.</span><strong>"Website"</strong> means the BC Registry Website at https://www.bcregistry.ca/business/auth/home/decide-business and includes all web pages and associated materials, with the exception of the Content.</li>
        </ul>
      </section>
      <section>
        <header>2. <u>ACCEPTANCE OF AGREEMENT</u></header>
        <ul>
            <li><span>2.1</span>The Subscriber acknowledges that a duly authorized representative of the Subscriber has accepted the terms of this Agreement on behalf of the Subscriber and its Team Members.</li>
            <li><span>2.2</span>The Subscriber acknowledges and agrees that:</li>
            <li>
            <ul>
                <li><span>(a)</span>by creating a profile and/or by clicking the button acknowledging acceptance of this Agreement, each Team Member using the Services on behalf of the Subscriber also accepts, and will be conclusively deemed to have accepted, the terms of this Agreement as they pertain to the Team Member’s use of the Services;</li>
                <li><span>(b)</span>the Additional Terms are incorporated herein by reference and also govern and apply to the Subscriber and to each Team Member’s use of the Service; and</li>
                <li><span>(c)</span>the Subscriber will be solely responsible for its Team Members’ use of the Services, including without limitation any Fees incurred by its Team Members in connection with such Services.</li>
            </ul>
            </li>
            <li><span>2.3</span>The Province reserves the right to make changes to the terms of this Agreement at any time without direct notice to either the Subscriber or its Team Members, as applicable.  The Subscriber acknowledges and agrees that it is the sole responsibility of the Subscriber to review, and, as applicable, to ensure that its Team Members review, the terms of this Agreement on a regular basis.</li>
            <li><span>2.4</span>Following the date of any such changes, the Subscriber will be conclusively deemed to have accepted any such changes on its own behalf and on behalf of its Team Members, as applicable.  The Subscriber acknowledges and agrees that each of its Team Members must also accept any such changes as they pertain to the Team Member’s use of the Services.</li>
        </ul>
      </section>
      <section>
        <header>3. <u>AUTHENTICATION</u></header>
        <ul>
		  <li><span>3.1</span>Subscribers acknowledge that regardless of the method of Authentication, any information provided as part of the authentication process will be used by the Province in connection with the Services and the Subscriber is responsible for ensuring that such information is up to date and accurate.  Subscribers are responsible for ensuring that its Team Members are aware of and will comply with this provision.</li>
          <li><span>3.2</span>If a Subscriber or a Team Member has used the BC Services Card to authenticate in setting up an account to use the Service, the terms found at <a href="https://www2.gov.bc.ca/gov/content/governments/government-id/bc-services-card/log-in-with-card/terms-of-use" target="_blank">https://www2.gov.bc.ca/gov/content/governments/government-id/bc-services-card/log-in-with-card/terms-of-use</a> (the “BC Services Card Terms”) continue to apply in respect of use of the BC Services Card.</li>
          <li><span>3.3</span>If the Subscriber or a Team Member has used a BCeID to authenticate in setting up an account to use the Service, the BCeID terms found at <a href="https://www.bceid.ca/aboutbceid/agreements.aspx" target="_blank">https://www.bceid.ca/aboutbceid/agreements.aspx</a> (the “BCeID Terms”) continue to apply in respect of the type of BCEID used.</li>
        </ul>
        </section>
        <section>
        <header>4. <u>PROPRIETARY RIGHTS</u></header>
        <ul>
          <li><span>4.1</span>The Website and the Content is owned by the Province and/or its licensors and is protected by copyright, trademark and other laws. Except as expressly permitted in this Agreement, the Subscriber may not use, reproduce, modify or distribute, or allow any other person to use, reproduce, modify or distribute, any part of the Website in any form whatsoever without the prior written consent of the Province.</li>
        </ul>
        </section>
        <section>
        <header>5. <u>SERVICES</u></header>
        <ul>
          <li><span>5.1</span>The Province will provide the Subscriber and its Team Members with Access on the terms and conditions set out in this Agreement.</li>
          <li><span>5.2</span>Subject to section 5.3, Access will be available during the hours published on the Website, as may be determined by the Province in its sole discretion from time to time.</li>
          <li><span>5.3</span>The Province reserves the right to limit or withdraw Access at any time in order to perform maintenance of the Service or in the event that the integrity or security of the Service is compromised.</li>
          <li><span>5.4</span>The Province further reserves the right to discontinue the Service at any time.</li>
          <li><span>5.5</span>The Province will provide helpdesk support to assist Team Members with Access during the hours published on the Website, as may be determined by the Province in its sole discretion from time to time.</li>
          <li><span>5.6</span>The Subscriber acknowledges and agrees that, for the purpose of Access:</li>
          <li>
          <ul>
              <li><span>(a)</span>it is the Subscriber’s sole responsibility, at the Subscriber’s own expense, to provide, operate and maintain computer hardware and communications software or web browser software that is compatible with the Services; and</li>
              <li><span>(b)</span>any failure to do so may impact the Subscriber’s and/or Team Member’s ability to access the Service.</li>
          </ul>
          </li>
        </ul>
      </section>
      <section>
        <header>6. <u>API</u></header>
        <ul>
            <li><span>6.1</span>Premium Account Subscribers who wish to access the Content through an API will be required to enter into a separate API Agreement with the Province (the <strong>“API Agreement”</strong>), including the requirement to contact the Province for an API key and access to either a test environment or the API interface</li>
        </ul>
      </section>
      <section>
        <header>7. <u>SUBSCRIBER OBLIGATIONS</u></header>
        <ul>
          <li><span>7.1</span>The Subscriber will comply, and will ensure that all of its Team Members are aware of and will comply, with:</li>
          <li>
            <ul>
                <li><span>(a)</span>the terms of this Agreement, including the requirements regarding the integrity and/or security of the Service set out in this Article 7; and
                <li><span>(b)</span>all applicable laws,</li>
            </ul>
          </li>
		  <li>in connection with the Subscriber’s and/or Team Members’ use of the Services.</li>
          <li><span>7.2</span>The Subscriber will ensure that each Team Member:</li>
          <li>
            <ul>
                <li><span>(a)</span>is duly authorized by the Subscriber to perform any Transaction and utilize the Service on behalf of the Subscriber;</li>
                <li><span>(b)</span>maintains in confidence Services Card Numbers, BCeID Information, Incorporation Numbers and Passcodes;</li>
                <li><span>(c)</span>is competent to perform a Transaction and utilize the Service;</li>
                <li><span>(d)</span>has been adequately trained and instructed to perform a Transaction and utilize the Service; and</li>
                <li><span>(e)</span>does not use the Service for any inappropriate or unlawful purpose.</li>
            </ul>
          </li>
          <li><span>7.3</span>The Subscriber will not, and will ensure that its Team Members do not, take any action that would compromise the integrity and/or security of the Service or any Content.</li>
          <li><span>7.4</span>Without limiting the general nature of the foregoing section, the Subscriber will not, and will ensure that its Team Members do not:</li>
          <li>
            <ul>
              <li><span>(a)</span>use the Service or any Content for activities or for a purpose different from those for which Access was granted including without limitation any unlawful activities or purpose;</li>
              <li><span>(b)</span>attempt to circumvent or subvert any security measures;</li>
              <li><span>(c)</span>take any action or use any program that impedes, restricts, limits or otherwise jeopardizes the operation and/or availability of the Service or any Content;</li>
              <li><span>(d)</span>take any action that might reasonably be construed as likely to adversely affect any other Subscriber, or Team Member;</li>
              <li><span>(e)</span>alter or delete any information in any Data Base unless explicitly authorized to do so by the Province;</li>
              <li><span>(f)</span>alter in any way whatsoever a printout or display of any information retrieved from any Data Base unless explicitly authorized to do so by the Province; or</li>
              <li><span>(g)</span>use, reproduce or distribute any altered information, including any printout or display of altered information, or represent any altered information as having been retrieved from any Data Base unless explicitly authorized to do so by the Province.</li>
            </ul>
          </li>
          <li><span>7.5</span> The Subscriber will adhere, and will ensure that each of its Team Members adhere, to any applicable security policies, standards or procedures in respect of a particular Data Base that may be provided to the Subscriber and/or its Team Members by the Province from time to time.</li>
        </ul>
      </section>
    <section>
      <header>8. <u>FEES</u></header>
      <ul>
        <li><span>8.1</span>The Subscriber will pay to the Province all applicable Fees for the Services.</li>
        <li><span>8.2</span>Subject to section 8.5, all Fees are due and payable when a Transaction is processed.</li>
        <li><span>8.3</span>If a Premium Account Subcriber opts to pay Fees through a Deposit Account, the Fees payable for Transactions will be charged to the applicable Deposit Account and in accordance with the BC Online Terms and Conditions found at <a href="https://www.bconline.gov.bc.ca/terms_conditions.html" target="_blank">(https://www.bconline.gov.bc.ca/terms_conditions.html)</a> (the <strong>“BC Online Terms”</strong>).</li>
        <li><span>8.4</span>If a Premium Account Subscriber opts to pay Fees through pre-authorized debit, the Fees payable for Transactions will be paid according to the PAD Agreement found at <a href="./PAD-terms-and-conditions" target="_blank">(https://www.bcregistry.ca/business/auth/PAD-terms-and-conditions)</a> (the Business Pre-Authorized Debit Terms and Conditions Agreement).</li>
        <li><span>8.5</span>Fees payable for Transactions processed by Basic Account Subscribers will be payable by credit card or online banking before the Transaction is processed.</li>
        <li><span>8.6</span>Unless otherwise specified in this Agreement, all references to money in respect of the Services are to Canadian dollars and all Fees will be processed in Canadian dollars.</li>
        <li><span>8.7</span>The Province may charge the Subscriber a service fee of thirty dollars ($30.00) if any method of payment of any Fees is rejected by the Subscriber’s financial institution for any failed payment, and may suspend Access until such service fee and all other Fees owing have been paid by the Subscriber.</li>
        <li><span>8.8</span>By law, some Transactions cannot be reversed and no refund or credit for these types of transactions will be issued.  For all other Transactions or Services, any refund or credit is at the sole discretion of the Province.</li>
        <li><span>8.9</span>The Province, by electronic or other means, will provide to the Premium Subscriber, at regular intervals to be determined by the Subscriber from options provided to the Subscriber by the Province, a statement that contains: (a) an itemized list of Transactions and (b) the total Fees for those Transactions.</li>
        <li><span>8.10</span>If a Subscriber has a BC Online account and wishes to receive a statement containing consolidated accounting of Transactions made in BC Online and the new BC Registry System, the Subscriber must link both accounts.</li>
        <li><span>8.11</span>Unless otherwise specified in any Additional Terms, if a Subscriber does not notify the Province in writing of any errors in or objections to any Fees identified in the Statement within ninety (90) days of the date of the applicable invoice, the Fees set out in the invoice will be conclusively deemed to have been accepted as correct by the Subscriber and no claim for adjustment or set-off will be accepted.</li>
      </ul>
    </section>
    <section>
      <header>9. <u>RELATIONSHIP</u></header>
      <ul>
        <li><span>9.1</span>This Agreement will not in any way make the Subscriber or any Team Member an employee, agent or independent contractor of the Province and the Subscriber will not, and will ensure that its Team Members do not, in any way indicate or hold out to any person that the Subscriber or any Team Member is an employee, agent or independent contractor of the Province.</li>
      </ul>
    </section>
    <section>
      <header>10. <u>SUSPENSION OF SERVICE</u></header>
      <ul>
        <li><span>10.1</span>The Province may, in its sole discretion, immediately suspend Access upon notice to the Subscriber in accordance with section 13 if:</li>
        <li>
          <ul>
            <li><span>(a)</span>the Subscriber or any of its Team Members has, in the reasonable opinion of the Province, in any way jeopardized the integrity or security of the Service;</li>
            <li><span>(b)</span>the Subscriber fails to pay Fees in accordance with section 8.2 or 8.5, as applicable; or</li>
            <li><span>(c)</span>the Subscriber or any of its Team Members has violated any other provision of this Agreement.</li>
          </ul>
        </li>
      </ul>
    </section>
    <section>
      <header>11. <u>TERMINATION</u></header>
      <ul>
        <li><span>11.1</span>The term of this Agreement will be from the Commencement Date and will continue until terminated in accordance with the provisions of this Agreement.</li>
        <li><span>11.2</span>The Province may immediately terminate this Agreement upon written notice to the Subscriber if the Subscriber’s Access has been suspended pursuant to Article 10.1.</li>
        <li><span>11.3</span>This Agreement may be terminated by either party for any reason upon providing sixty (60) days written notice to the other party.</li>
        <li><span>11.4</span>Upon termination:</li>
        <li>
          <ul>
            <li><span>(a)</span>the Subscriber will immediately cease, and will ensure that all of its Team Members immediately cease, all use on the Subscriber’s behalf of the Service and all Passcodes; and</li>
            <li><span>(b)</span>Premium Account Subscribers will pay to the Province all unpaid Fees incurred by the Subscriber up to the date of termination.</li>
          </ul>
        </li>
        <li><span>11.5</span>In the event that a Subscriber’s Agreement is terminated, the Province reserves the right to refuse future Access to that Subscriber or to downgrade a Premium Account Subscriber to a Basic Account Subscriber, in which case the Subscriber acknowledges and agrees that it is only entitled to Access up to ten Transactions per month.</li>
      </ul>
    </section>
    <section>
      <header>12. <u>WARRANTY DISCLAIMER, LIMITATION OF LIABILITY AND INDEMNITY</u></header>
      <ul>
        <li><span>12.1</span>THE SUBSCRIBER ACKNOWLEDGES AND CONFIRMS THAT THE SUBSCRIBER UNDERSTANDS THAT THIS ARTICLE 12 REQUIRES THE SUBSCRIBER TO ASSUME THE FULL RISK IN RESPECT OF ANY USE OF THE SERVICES BY THE SUBSCRIBER AND/OR ITS TEAM MEMBERS.</li>
        <li><span>12.2</span>Except as expressly set out in this Agreement, and in addition to the Province’s general <u>Warranty Disclaimer and Limitation of Liabilities</u>, the Province assumes no responsibility or liability to any person using the Service or any Content.  In particular, without limiting the general nature of the foregoing:</li>
        <li>
        <ul>
          <li><span>(a)</span>in no event will the Province, its respective servants, agents, contractors or employees be liable for any direct, indirect, special or consequential damages or other loss, claim or injury, whether foreseeable or unforeseeable (including without limitation claims for damages for personal injury, lost profits, lost savings or business opportunities) arising out of or in any way connected with the use of, or inability to use the Service or any Content;</li>
          <li><span>(b)</span>the entire risk as to the quality and performance of the Service or any Content is assumed by the Subscriber;</li>
          <li><span>(c)</span>the Service and all Content are provided “as is”, and the Province disclaims all representations, warranties, conditions, obligations and liabilities of any kind, whether express or implied, in relation to the Service or any Content, including without limitation implied warranties with respect to merchantability, fitness for a particular purpose, error-free or uninterrupted use and non-infringement; and</li>
          <li><span>(d)</span>in no event will the Province, its respective servants, agents, contractors or employees be liable for any loss or damage in connection with the Service or any Content, including without limitation any loss or damage caused by any alteration of the format or content of a print copy or electronic display of any information retrieved from the Service, the quality of any print display, the information contained in any screen dump, any system failure, hardware malfunction, manipulation of data, inadequate or faulty Transaction and/or Service, or delay or failure to provide Access to any Team Member or any person using a Team Member's Incorporation Numbers or Passcodes or using any information provided by a Subscriber or any Team Member from the Service.</li>
        </ul>
        <li><span>12.3</span>The Subscriber must indemnify and save harmless the Province and its respective servants, agents, contractor and employees from any losses, claims, damages, actions, causes of action, costs and expenses that the Province or any of its respective servants, agents, contractors or employees may sustain, incur, suffer or be put to at any time, either before or after this Agreement ends, including any claim of infringement of third-party intellectual property rights, where the same or any of them are based upon, arise out of or occur, directly or indirectly, by reason of any act or omission by the Subscriber, a Team Member or by any of the Subscriber’s other agents, employees, officers or directors in connection with this Agreement.</li>
      </ul>
    </section>
    <section>
      <header>13. <u>NOTICES</u></header>
      <ul>
        <li><span>13.1</span>Any written notice either party may be required or may desire to give to the other under this Agreement will be conclusively deemed validly given to or received by the addressee, if delivered personally or by recognized courier service, on the date of such personal delivery, if mailed by prepaid registered mail, on the third business day after the mailing of the same in British Columbia or on the seventh business day if mailed elsewhere, and if delivered by email, on the date received by the recipient:
        <li>
          <ul>
            <li><span>(a)</span>If to the Subscriber, to the address or email address indicated on the Subscriber’s application for the Service, or such other address or email address of which the Subscriber has notified the Province in writing; and</li>
          </ul>
        </li>
        <li>
          <ul>
            <p><span>(b)</span>If to the Province:<br></p>
            <p>
              Delivery by mail:
            </p>
            <p>
              BC Online Partnership Office<br>
              Ministry of Citizens’ Services<br>
              PO Box 9412 Stn Prov Govt<br>
              Victoria, BC V8W 9V1
            </p>
            <p>Delivery by courier or in person:</p>
            <p>
              BC Online Partnership Office<br>
              Ministry of Citizens’ Services<br>
              E161 – 4000 Seymour Place<br>
              Victoria, BC V8X 4S8
            </p>
            <p>Delivery by email:</p>
            [email protected]
          </ul>
        </li>
        <li><span>13.2</span>The Subscriber will provide the Province with timely written notice of any change of contact information provided by the Subscriber during the application process for Access, and after the provision of such notice, the updated contact information will be conclusively deemed to be the current contact information for the Subscriber, including the Subscriber’s address or email addrss for the purposes of this Article 13.</li>
        <li><span>13.3</span>The Province may, from time to time, advise the Subscriber by notice in writing of any change of address of the Province and from and after the giving of such notice the address specified in the notice will, for the purposes of this Article 13, be conclusively deemed to be the address or email address of the Province.</li>
        <li><span>13.4</span>In the event of a disruption of postal services, all mailed notices will be deemed validly given and received when actually received by the addressee.</li>
      </ul>
    </section>
    <section>
      <header>14. <u>GENERAL</u></header>
      <ul>
        <li><span>14.1</span>In this Agreement,</li>
        <li>
        <ul>
          <li><span>(a)</span>unless the context otherwise requires, references to section or Articles by number are to sections or Articles of the body of the Agreement;</li>
          <li><span>(b)</span>unless otherwise specified, a reference to a statute by name means the statute of British Columbia by that name, as amended or replaced from time to time;</li>
          <li><span>(c)</span>"person" includes an individual, partnership, corporation or legal entity of any nature; and</li>
          <li><span>(d)</span>unless the context otherwise requires, words expressed in the singular includes the plural and vice versa.</li>
        </ul>
        </li>
        <li><span>14.2</span>The Schedules to this Agreement are a part of this Agreement.</li>
        <li><span>14.3</span>In the event of any conflict or inconsistency between a term in the body of this Agreement and a term in a Schedule, the term in the Schedule will prevail.</li>
        <li><span>14.4</span>The Subscriber will not, without the prior written consent of the Province, assign, either directly or indirectly, this Agreement or any right of the Subscriber under this Agreement.</li>
        <li><span>14.5</span>This Agreement will be for the benefit of and be binding upon the successors and permitted assigns of each of the parties.</li>
        <li><span>14.6</span>This Agreement (including any terms incorporated by reference herein) is the entire agreement between the Subscriber and the Province with respect to the subject matter of this Agreement, and supercedes and replaces any prior and/or written agreements.</li>
        <li><span>14.7</span>The headings in this Agreement are inserted for convenience only, and will not be used in interpreting or construing any provision of this Agreement.</li>
        <li><span>14.8</span>All provisions in this Agreement in favour or either party and all rights and remedies of either party, either at law or in equity, will survive the expiration or sooner termination of this Agreement.</li>
        <li><span>14.9</span>If any provision of this Agreement is invalid, illegal or unenforceable, that provision will be severed from this Agreement and all other provisions will remain in full force and effect.</li>
        <li><span>14.10</span>This Agreement will be governed by and construed in accordance with the laws of British Columbia and the laws of Canada applicable therein.  By using the Service, the Subscriber consents to the exclusive jurisdiction and venue of the courts of the province of British Columbia for the hearing of any dispute arising from or related to this Agreement and/or the Subscriber’s use of the Service.</li>
      </ul>
    </section>
      """

    op.bulk_insert(documents, [{
        'version_id': '5',
        'type': 'termsofuse',
        'content': html_content,
        'content_type': 'text/html'
    }])
Beispiel #42
0
from sqlalchemy.sql.compiler import BIND_TEMPLATES
from sqlalchemy.sql.functions import FunctionElement
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.assertions import expect_warnings
from sqlalchemy.testing.engines import all_dialects


table1 = table(
    "mytable",
    column("myid", Integer),
    column("name", String),
    column("description", String),
)


class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
    __dialect__ = "default"

    def setup(self):
        self._registry = deepcopy(functions._registry)

    def teardown(self):
        functions._registry = self._registry

    def test_compile(self):
        for dialect in all_dialects(exclude=("sybase",)):
Beispiel #43
0
Create Date: 2013-11-22 00:10:39.635553

"""

# revision identifiers, used by Alembic.
revision = '24c56d737c18'
down_revision = '18bf74925b9'

import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json

roles_table = table('roles', column('id', sa.Integer),
                    column('name', sa.String),
                    column('permissions_json', sa.String))


def upgrade():
    folder_set = set(["Program", "Audit", "Request"])

    connection = op.get_bind()
    roles = connection.execute(
        select([roles_table.c.id, roles_table.c.permissions_json]))

    for role_id, permissions_json in roles:
        permissions = json.loads(permissions_json)
        if (permissions is None):
            continue
Beispiel #44
0
 def test_insert(self):
     t = table("sometable", column("somecolumn"))
     self.assert_compile(
         t.insert(),
         "INSERT INTO sometable (somecolumn) VALUES " "(:somecolumn)",
     )
def upgrade():
    op.add_column(u'report_partition_info',
                  sa.Column(u'partition_column', sa.TEXT()))

    # Drop FK constraints on parent tables
    op.drop_constraint(
        'signature_summary_architecture_product_version_id_fkey',
        'signature_summary_architecture')
    op.drop_constraint('signature_summary_architecture_signature_id_fkey',
                       'signature_summary_architecture')
    op.drop_constraint(
        'signature_summary_flash_version_product_version_id_fkey',
        'signature_summary_flash_version')
    op.drop_constraint('signature_summary_flash_version_signature_id_fkey',
                       'signature_summary_flash_version')
    op.drop_constraint('signature_summary_installations_signature_id_fkey',
                       'signature_summary_installations')
    op.drop_constraint('signature_summary_os_product_version_id_fkey',
                       'signature_summary_os')
    op.drop_constraint('signature_summary_os_signature_id_fkey',
                       'signature_summary_os')
    op.drop_constraint(
        'signature_summary_process_type_product_version_id_fkey',
        'signature_summary_process_type')
    op.drop_constraint('signature_summary_process_type_signature_id_fkey',
                       'signature_summary_process_type')
    op.drop_constraint('signature_summary_products_product_version_id_fkey',
                       'signature_summary_products')
    op.drop_constraint('signature_summary_products_signature_id_fkey',
                       'signature_summary_products')
    op.drop_constraint('signature_summary_uptime_product_version_id_fkey',
                       'signature_summary_uptime')
    op.drop_constraint('signature_summary_uptime_signature_id_fkey',
                       'signature_summary_uptime')

    app_path = os.getcwd()
    procs = [
        'weekly_report_partitions.sql', 'backfill_weekly_report_partitions.sql'
    ]
    for myfile in [
            app_path + '/socorro/external/postgresql/raw_sql/procs/' + line
            for line in procs
    ]:
        proc = open(myfile, 'r').read()
        op.execute(proc)
    # Now run this against the raw_crashes table
    op.execute("""
        UPDATE report_partition_info
        SET partition_column = 'date_processed'
    """)

    report_partition_info = table(
        u'report_partition_info',
        column(u'build_order', sa.INTEGER()),
        column(u'fkeys', postgresql.ARRAY(sa.TEXT())),
        column(u'indexes', postgresql.ARRAY(sa.TEXT())),
        column(u'keys', postgresql.ARRAY(sa.TEXT())),
        column(u'table_name', CITEXT()),
        column(u'partition_column', sa.TEXT()),
    )
    op.bulk_insert(report_partition_info, [
        {
            'table_name': u'signature_summary_installations',
            'build_order': 5,
            'fkeys': ["(signature_id) REFERENCES signatures(signature_id)"],
            'partition_column': 'report_date',
            'keys': ["signature_id,product_name,version_string,report_date"],
            'indexes': ["report_date"],
        },
        {
            'table_name':
            u'signature_summary_architecture',
            'build_order':
            6,
            'fkeys': [
                "(signature_id) REFERENCES signatures(signature_id)",
                "(product_version_id) REFERENCES product_versions(product_version_id)"
            ],
            'partition_column':
            'report_date',
            'keys':
            ["signature_id, architecture, product_version_id, report_date"],
            'indexes': ["report_date"],
        },
        {
            'table_name':
            u'signature_summary_flash_version',
            'build_order':
            7,
            'fkeys': [
                "(signature_id) REFERENCES signatures(signature_id)",
                "(product_version_id) REFERENCES product_versions(product_version_id)"
            ],
            'partition_column':
            'report_date',
            'keys':
            ["signature_id, flash_version, product_version_id, report_date"],
            'indexes': ["report_date"],
        },
        {
            'table_name':
            u'signature_summary_os',
            'build_order':
            8,
            'fkeys': [
                "(signature_id) REFERENCES signatures(signature_id)",
                "(product_version_id) REFERENCES product_versions(product_version_id)"
            ],
            'partition_column':
            'report_date',
            'keys': [
                "signature_id, os_version_string, product_version_id, report_date"
            ],
            'indexes': ["report_date"],
        },
        {
            'table_name':
            u'signature_summary_process_type',
            'build_order':
            9,
            'fkeys': [
                "(signature_id) REFERENCES signatures(signature_id)",
                "(product_version_id) REFERENCES product_versions(product_version_id)"
            ],
            'partition_column':
            'report_date',
            'keys':
            ["signature_id, process_type, product_version_id, report_date"],
            'indexes': ["report_date"],
        },
        {
            'table_name':
            u'signature_summary_products',
            'build_order':
            10,
            'fkeys': [
                "(signature_id) REFERENCES signatures(signature_id)",
                "(product_version_id) REFERENCES product_versions(product_version_id)"
            ],
            'partition_column':
            'report_date',
            'keys': ["signature_id, product_version_id, report_date"],
            'indexes': ["report_date"],
        },
        {
            'table_name':
            u'signature_summary_uptime',
            'build_order':
            11,
            'fkeys': [
                "(signature_id) REFERENCES signatures(signature_id)",
                "(product_version_id) REFERENCES product_versions(product_version_id)"
            ],
            'partition_column':
            'report_date',
            'keys':
            ["signature_id, uptime_string, product_version_id, report_date"],
            'indexes': ["report_date"],
        },
    ])

    op.alter_column(u'report_partition_info',
                    u'partition_column',
                    existing_type=sa.TEXT(),
                    nullable=False)

    op.execute("""
        SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_architecture')
    """)
    op.execute("""
        SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_flash_version')
    """)
    op.execute("""
        SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_installations')
    """)
    op.execute("""
        SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_os')
    """)
    op.execute("""
        SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_process_type')
    """)
    op.execute("""
        SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_products')
    """)
    op.execute("""
        SELECT backfill_weekly_report_partitions('2013-06-03', '2013-08-12', 'signature_summary_uptime')
    """)
Beispiel #46
0
down_revision = '33e0cdb6971d'

from alembic import op
import sqlalchemy as sa
from sqlalchemy import sql

TYPE_FORWARD = 'forward'

DESTINATION_FEATURES_ID = 8

PARKING_TYPE = 'parkext'

phonefunckey_table = sql.table('phonefunckey', sql.column('iduserfeatures'),
                               sql.column('fknum'), sql.column('exten'),
                               sql.column('typeextenumbers'),
                               sql.column('typevalextenumbers'),
                               sql.column('typeextenumbersright'),
                               sql.column('typevalextenumbersright'),
                               sql.column('label'), sql.column('supervision'),
                               sql.column('progfunckey'))

func_key_table = sql.table('func_key', sql.column('id'), sql.column('type_id'),
                           sql.column('destination_type_id'))

dest_features_table = sql.table('func_key_dest_features',
                                sql.column('func_key_id'),
                                sql.column('destination_type_id'),
                                sql.column('features_id'))

func_key_type_table = sql.table('func_key_type', sql.column('id'),
                                sql.column('name'))
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
#     if is_sqlite:
#         with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
#             batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
#     else:
#         op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================

dept_membership_request_table = table(
    'dept_membership_request',
    sa.Column('id', residue.UUID()),
    sa.Column('attendee_id', residue.UUID()),
    sa.Column('department_id', residue.UUID()),
)


def upgrade():
    if is_sqlite:
        with op.batch_alter_table(
                'job', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
            batch_op.add_column(
                sa.Column('visibility',
                          sa.Integer(),
                          server_default='0',
                          nullable=False))
            batch_op.create_index('ix_job_department_id', ['department_id'],
                                  unique=False)
Beispiel #48
0
Revises: 10ac78260434
Create Date: 2016-04-07 17:36:05.351561

"""

# revision identifiers, used by Alembic.
revision = 'adb90a264e3'
down_revision = '10ac78260434'

from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
import sqlalchemy_utils

order = table('customer_order',
              column('id', sqlalchemy_utils.types.uuid.UUIDType()),
              column('status', sa.Integer))

line_item = table(
    'line_item',
    column('customer_order_id', sqlalchemy_utils.types.uuid.UUIDType()),
    column('status', sa.Integer))


def upgrade():
    purchase_order_query = sa.select([order.c.id]).where(order.c.status == 0)
    op.execute(line_item.update().where(
        line_item.c.customer_order_id.in_(purchase_order_query)).values(
            {'status': 2}))

 def test_cast(self, type_, expected):
     t = sql.table("t", sql.column("col"))
     self.assert_compile(cast(t.c.col, type_), expected)
 def setup_test(self):
     self.table = table(
         "mytable", column("myid", Integer), column("name", String)
     )
 def test_match(self):
     matchtable = table("matchtable", column("title", String))
     self.assert_compile(
         matchtable.c.title.match("somstr"),
         "MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)",
     )
Beispiel #52
0
"""

# revision identifiers, used by Alembic.
revision = 'd2e48801c8ef'
down_revision = 'e169a4a81d88'
branch_labels = None
depends_on = None

from alembic import op
import sqlalchemy as sa
from sqlalchemy import sql

from ironic_inspector import introspection_state as istate

Node = sql.table('nodes',
                 sql.column('error', sa.String),
                 sql.column('state', sa.Enum(*istate.States.all())))


def upgrade():
    state_enum = sa.Enum(*istate.States.all(), name='node_state')
    state_enum.create(op.get_bind())

    op.add_column('nodes', sa.Column('version_id', sa.String(36),
                                     server_default=''))
    op.add_column('nodes', sa.Column('state', state_enum,
                                     nullable=False,
                                     default=istate.States.finished,
                                     server_default=istate.States.finished))
    # correct the state: finished -> error if Node.error is not null
    stmt = Node.update().where(Node.c.error != sql.null()).values(
"""

# revision identifiers, used by Alembic.
revision = '610'
down_revision = '600'

import re

from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql

briefs = table(
    'briefs',
    column('id', sa.Integer),
    column('data', postgresql.JSON),
)


def upgrade():
    conn = op.get_bind()
    for brief in conn.execute(briefs.select()):
        # Skip briefs with missing or integer 'numberOfSuppliers'
        if brief.data.get('numberOfSuppliers') is None or isinstance(
                brief.data['numberOfSuppliers'], int):
            continue

        # Get the last number in the 'numberOfSuppliers' string
        number_of_suppliers = re.search(r'(\d+)(?!.*\d)',
                                        brief.data['numberOfSuppliers'])
Beispiel #54
0
def _insert_operation_translation():
    tb = table(
        'operation_translation',
        column('id', Integer),
        column('locale', String),
        column('name', String),
        column('description', String),
    )
    columns = [c.name for c in tb.columns]
    data = [
        (4021, 'en', 'Logistic regression Classifier',
         'Performs logistic regression.'),
        (4021, 'pt', 'Classificador Regressão Logistica',
         'Classificador por Regressão Logistica.'),
        (4022, 'en', 'Random forest classifier', 'Random forest classifier.'),
        (4022, 'pt', 'Classificador random forest',
         'Classificador random forest.'),
        (4023, 'en', 'GBT Classifier',
         'Gradient-Boosted Trees (GBTs) learning algorithm for classification. It supports binary labels, as well as both continuous and categorical features.'
         ),
        (4023, 'pt', 'Classificador GBT',
         'Algoritmo de aprendizado para classificação Gradient-Boosted Trees (GBTs). Suporta rótulos binários e features contínuas e categóricas.'
         ),
        (4024, 'en', 'Decision tree classifier',
         'Decision tree learning algorithm for classification. It supports both binary and multiclass labels, as well as both continuous and categorical features.'
         ),
        (4024, 'pt', 'Classif. Árv. Decisão',
         'Classificador baseado em árvores de decisão. Suporta tanto rótulos binários quanto multiclasses e features contínuas e categóricas.'
         ),
        (4025, 'en', 'Perceptron Classifier',
         'Classifier trainer based on the Multilayer Perceptron.'),
        (4025, 'pt', 'Classificador Perceptron',
         'Classificador baseado no Perceptron de Multicamadas.'),
        (4026, 'en', 'Gradient Boosting Regressor',
         'Gradient Boosting for regression'),
        (4026, 'pt', 'Regressor Gradient Boosting',
         'Regressão por Gradient Boosting'),
        (4027, 'en', 'Linear Regression',
         'Linear regression with combined L1 and L2 priors as regularizer (ElasticNet).'
         ),
        (4027, 'pt', 'Regressão Linear',
         'Regressão linear com combinações de regularizadores L1 e L2 (ElasticNet).'
         ),
        (4028, 'en', 'Random Forest Regressor', 'A random forest regressor.'),
        (4028, 'pt', 'Regressão por Random Forest',
         'Um regressor por random forest.'),
        (4029, 'en', 'SGD Regressor',
         'Linear model fitted by minimizing a regularized empirical loss with Stochastic Gradient Descent.'
         ),
        (4029, 'pt', 'Regressor SGD',
         'Modelo linear ajustado por minimização com o gradiente descendente estocástico.'
         ),
        (4030, 'en', 'Huber Regressor',
         'Linear regression model that is robust to outliers.'),
        (4030, 'pt', 'Regressor Hube ',
         'Modelo de regressão linear que é robusto para outliers.'),
        (4031, 'en', 'SVM Classification', 'Uses a SVM Classifier.'),
        (4031, 'pt', 'Classificador SVM', 'Usa um classificador SVM.'),
        (4032, 'en', 'Naive-Bayes Classifier',
         'Uses a Naive-Bayes Classifier.'),
        (4032, 'pt', 'Classificador Naive-Bayes',
         'Usa um classificador Naive-Bayes.'),
        (4033, 'en', 'K-Means Clustering',
         'Uses K-Means algorithm for clustering.'),
        (4033, 'pt', 'Agrupamento K-Means',
         'Usa o algoritmo K-Means para agrupamento.'),
        (4034, 'en', 'Multi-layer Perceptron classifier',
         'Multi-layer Perceptron classifier.'),
        (4034, 'pt', 'Classificador Perceptron multicamadas',
         'Classificador Perceptron multicamadas.'),
        (4035, 'en', 'Multi-layer Perceptron Regressor',
         'Multi-layer Perceptron Regressor.'),
        (4035, 'pt', 'Regressor Perceptron multicamadas',
         'Regressor Perceptron multicamadas.'),
        (4036, 'en', 'KNN Classification', 'Uses a KNN Classifier'),
        (4036, 'pt', 'Classificador KNN', 'Usa um classificador KNN'),
    ]
    rows = [dict(list(zip(columns, row))) for row in data]
    op.bulk_insert(tb, rows)
Beispiel #55
0
Create Date: 2019-04-12 00:29:08.021141

"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql import column, table

from CTFd.models import db

# revision identifiers, used by Alembic.
revision = "b5551cd26764"
down_revision = "4e4d5a9ea000"
branch_labels = None
depends_on = None

teams_table = table("teams", column("id", db.Integer), column("captain_id", db.Integer))

users_table = table("users", column("id", db.Integer), column("team_id", db.Integer))


def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.add_column("teams", sa.Column("captain_id", sa.Integer(), nullable=True))

    bind = op.get_bind()
    url = str(bind.engine.url)
    if url.startswith("sqlite") is False:
        op.create_foreign_key(
            "team_captain_id", "teams", "users", ["captain_id"], ["id"]
        )
def upgrade():
    # These users made duplicate accounts

    # 2|jgreitman
    # delete 148 keep 614

    # 2|gratusin
    #  delete 69 keep 117

    # 2|obnomad
    # delete 92 keep 377

    # 2|smartydh9
    # delete 756 keep 791

    # 2|trentbrown
    # delete 89 keep 88

    #  Create an ad-hoc table to use for the insert statement.
    user_table = table('user', column('id', Integer))

    user_item_table = table('user_item', column('user_id', Integer))


    op.execute(
        user_table.delete().\
            where(user_table.c.id == 148)
            )

    op.execute(
        user_item_table.delete().\
            where(user_item_table.c.user_id==148)
            )

    op.execute(
        user_table.delete().\
            where(user_table.c.id == 69)
            )
    op.execute(
        user_item_table.delete().\
            where(user_item_table.c.user_id==69)
            )

    op.execute(
        user_table.delete().\
            where(user_table.c.id == 92)
            )
    op.execute(
        user_item_table.delete().\
            where(user_item_table.c.user_id==92)
            )

    op.execute(
        user_table.delete().\
            where(user_table.c.id == 756)
            )
    op.execute(
        user_item_table.delete().\
            where(user_item_table.c.user_id==756)
            )
    op.execute(
        user_table.delete().\
            where(user_table.c.id == 89)
            )
    op.execute(
        user_item_table.delete().\
            where(user_item_table.c.user_id==89)
            )
Beispiel #57
0
"""Test the TextClause and related constructs."""

from sqlalchemy.testing import fixtures, AssertsCompiledSQL, eq_, \
    assert_raises_message, expect_warnings, assert_warnings
from sqlalchemy import text, select, Integer, String, Float, \
    bindparam, and_, func, literal_column, exc, MetaData, Table, Column,\
    asc, func, desc, union
from sqlalchemy.types import NullType
from sqlalchemy.sql import table, column, util as sql_util
from sqlalchemy import util

table1 = table(
    'mytable',
    column('myid', Integer),
    column('name', String),
    column('description', String),
)

table2 = table(
    'myothertable',
    column('otherid', Integer),
    column('othername', String),
)


class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
    __dialect__ = 'default'

    def test_basic(self):
        self.assert_compile(text("select * from foo where lala = bar"),
                            "select * from foo where lala = bar")
Beispiel #58
0
 def test_select_composition_seven(self):
     self.assert_compile(
         select([literal_column('col1'),
                 literal_column('col2')],
                from_obj=table('tablename')).alias('myalias'),
         "SELECT col1, col2 FROM tablename")
def test_select_from_select(db):
    from sqlalchemy import sql
    s1 = sql.select([sql.column('number'), sql.column('name')]
                    ).select_from(sql.table('test'))
    out = read_sql_table(s1, db, npartitions=2, index_col='number')
    assert_eq(out, df[['name']])
Beispiel #60
0
def _insert_operation_operation_form():
    tb = table('operation_operation_form', column('operation_id', Integer),
               column('operation_form_id', Integer))

    columns = [c.name for c in tb.columns]
    data = [

        # Classifier
        [4021, 39],
        [4021, 40],
        [4021, 41],
        [4021, 110],
        [4021, 4021],
        [4021, 4001],
        [4022, 39],
        [4022, 40],
        [4022, 41],
        [4022, 110],
        [4022, 4021],
        [4022, 4002],
        [4023, 39],
        [4023, 40],
        [4023, 41],
        [4023, 110],
        [4023, 4021],
        [4023, 4003],
        [4024, 39],
        [4024, 40],
        [4024, 41],
        [4024, 110],
        [4024, 4021],
        [4024, 4004],
        [4025, 39],
        [4025, 40],
        [4025, 41],
        [4025, 110],
        [4025, 4021],
        [4025, 4005],
        [4031, 39],
        [4031, 40],
        [4031, 41],
        [4031, 110],
        [4031, 4021],
        [4031, 4011],
        [4032, 39],
        [4032, 40],
        [4032, 41],
        [4032, 110],
        [4032, 4021],
        [4032, 4012],
        [4034, 39],
        [4034, 40],
        [4034, 41],
        [4034, 110],
        [4034, 4021],
        [4034, 4019],
        [4036, 39],
        [4036, 40],
        [4036, 41],
        [4036, 110],
        [4036, 3005],
        [4036, 4021],

        # Regressor
        [4026, 41],
        [4026, 110],
        [4026, 4022],
        [4026, 4006],
        [4027, 41],
        [4027, 110],
        [4027, 4022],
        [4027, 4007],
        [4028, 41],
        [4028, 110],
        [4028, 4022],
        [4028, 4008],
        [4029, 41],
        [4029, 110],
        [4029, 4022],
        [4029, 4009],
        [4030, 41],
        [4030, 110],
        [4030, 4022],
        [4030, 4010],
        [4035, 41],
        [4035, 110],
        [4035, 4022],
        [4035, 4020],

        # Clustering
        [4033, 41],
        [4033, 110],
        [4033, 4023],
        [4033, 4013],
    ]
    rows = [dict(list(zip(columns, row))) for row in data]
    op.bulk_insert(tb, rows)