Example #1
0
def get_candidate_task_ids(project_id, user_id=None, user_ip=None):
    """Get all available tasks for a given project and user."""
    rows = None
    print "get_candidate_task_ids is called"
    if user_id:
        query = text(
            """
                     SELECT id FROM task WHERE NOT EXISTS
                     (SELECT task_id FROM task_run WHERE
                     project_id=:project_id AND user_id=:user_id
                        AND task_id=task.id)
                     AND project_id=:project_id AND state !='completed'
                     ORDER BY priority_0 DESC, id ASC LIMIT 10"""
        )
        rows = session.execute(query, dict(project_id=project_id, user_id=user_id))
    else:
        if not user_ip:
            user_ip = "127.0.0.1"
        query = text(
            """
                     SELECT id FROM task WHERE NOT EXISTS
                     (SELECT task_id FROM task_run WHERE
                     project_id=:project_id AND user_ip=:user_ip
                        AND task_id=task.id)
                     AND project_id=:project_id AND state !='completed'
                     ORDER BY priority_0 DESC, id ASC LIMIT 10"""
        )
        rows = session.execute(query, dict(project_id=project_id, user_ip=user_ip))

    return [t.id for t in rows]
Example #2
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.create_table('roles_authorities',
    sa.Column('authority_id', sa.Integer(), nullable=True),
    sa.Column('role_id', sa.Integer(), nullable=True),
    sa.ForeignKeyConstraint(['authority_id'], ['authorities.id'], ),
    sa.ForeignKeyConstraint(['role_id'], ['roles.id'], )
    )
    op.create_index('roles_authorities_ix', 'roles_authorities', ['authority_id', 'role_id'], unique=True)
    op.create_table('roles_certificates',
    sa.Column('certificate_id', sa.Integer(), nullable=True),
    sa.Column('role_id', sa.Integer(), nullable=True),
    sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ),
    sa.ForeignKeyConstraint(['role_id'], ['roles.id'], )
    )
    op.create_index('roles_certificates_ix', 'roles_certificates', ['certificate_id', 'role_id'], unique=True)
    op.create_index('certificate_associations_ix', 'certificate_associations', ['domain_id', 'certificate_id'], unique=True)
    op.create_index('certificate_destination_associations_ix', 'certificate_destination_associations', ['destination_id', 'certificate_id'], unique=True)
    op.create_index('certificate_notification_associations_ix', 'certificate_notification_associations', ['notification_id', 'certificate_id'], unique=True)
    op.create_index('certificate_replacement_associations_ix', 'certificate_replacement_associations', ['certificate_id', 'certificate_id'], unique=True)
    op.create_index('certificate_source_associations_ix', 'certificate_source_associations', ['source_id', 'certificate_id'], unique=True)
    op.create_index('roles_users_ix', 'roles_users', ['user_id', 'role_id'], unique=True)

    ### end Alembic commands ###

    # migrate existing authority_id relationship to many_to_many
    conn = op.get_bind()
    for id, authority_id in conn.execute(text('select id, authority_id from roles where authority_id is not null')):
        stmt = text('insert into roles_authorities (role_id, authority_id) values (:role_id, :authority_id)')
        stmt = stmt.bindparams(role_id=id, authority_id=authority_id)
        op.execute(stmt)
Example #3
0
def _get_table_infomation(userid):
    data = db.session.execute(
        db.session.query()
        .from_statement(text(""" select orgprefix from admin where id = :userid """))
        .params(userid=userid)
    )
    ooo = []
    for d in data:
        print d
        orgs = d.orgprefix.split(",")
        for o in orgs:
            ooo.append("t.table_name like '" + o + "\_%'")

    sqlstr = """
			SELECT t.table_name ,  concat(ROUND(DATA_LENGTH/1024/1024,2),'MB') AS data_length
			FROM INFORMATION_SCHEMA.TABLES  t
			WHERE t.TABLE_SCHEMA='test' 
			and 
			""" + " or ".join(
        ooo
    )
    print sqlstr
    query = db.session.query().from_statement(text(sqlstr)).params(userid=userid)

    data = query_to_list_json(query)
    return data
def upgrade():
    op.drop_constraint("queues_pkey", "queues")
    op.execute(CreateSequence(Sequence("queues_id_seq")))
    op.add_column("queues", Column("id", Integer, server_default=text("nextval('queues_id_seq'::regclass)")))
    op.create_primary_key("queues_pkey", "queues", ["id"])

    Email.__table__.create(bind=engine)
    op.create_table(
        "queue_notifications",
        Column("queue_id", Integer, ForeignKey("queues.id")),
        Column("email_id", Integer, ForeignKey("emails.id")),
    )
    op.add_column("users", Column("email_id", Integer, ForeignKey("emails.id")))

    conn = op.get_bind()
    s = select([text("users.email")]).select_from(text("users"))
    users = conn.execute(s)
    table_user = User.__table__
    for row in users:
        ins = Email.__table__.insert().values(address=row["email"])
        result_insert_email = conn.execute(ins)
        upd = (
            table_user.update()
            .values(email_id=result_insert_email.inserted_primary_key[0])
            .where(text("users.email = :email"))
        )
        conn.execute(upd, email=row["email"])

    op.drop_column("users", "email")
Example #5
0
def get_candidate_tasks(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):
    """Gets all available tasks for a given project and user"""
    rows = None
    if user_id and not user_ip:
        query = text('''
                     SELECT id FROM task WHERE NOT EXISTS
                     (SELECT task_id FROM task_run WHERE
                     app_id=:app_id AND user_id=:user_id AND task_id=task.id)
                     AND app_id=:app_id AND state !='completed'
                     ORDER BY priority_0 DESC, id ASC LIMIT 10''')
        rows = session.execute(query, dict(app_id=app_id, user_id=user_id))
    else:
        if not user_ip:
            user_ip = '127.0.0.1'
        query = text('''
                     SELECT id FROM task WHERE NOT EXISTS
                     (SELECT task_id FROM task_run WHERE
                     app_id=:app_id AND user_ip=:user_ip AND task_id=task.id)
                     AND app_id=:app_id AND state !='completed'
                     ORDER BY priority_0 DESC, id ASC LIMIT 10''')
        rows = session.execute(query, dict(app_id=app_id, user_ip=user_ip))

    tasks = []
    for t in rows:
        tasks.append(session.query(Task).get(t.id))
    return tasks
Example #6
0
def update_total_view_count(user_ids): 
    try:   
        result = db.session.execute(text("""SELECT answer_author, sum(posts.view_count)
                                        AS post_views 
                                        FROM posts 
                                        WHERE posts.answer_author in :user_ids"""
                                        ),
                                    params={'user_ids':user_ids}
                                    )
        result = dict(list(result))
        
        for user_id, post_view_count in result.items():
            if not post_view_count:
                post_view_count = 0
            
            db.session.execute(text("""UPDATE users 
                                        SET total_view_count=view_count+:post_view_count
                                        WHERE id=:user_id"""
                                ), params={'user_id':user_id, 'post_view_count':int(post_view_count)})

        db.session.commit()
        db.session.remove()

    except:
        db.session.rollback()
        db.session.remove()
Example #7
0
def update_view_count_to_db(url):
    try:
        return
        original_url = url
        count = redis_views.get(url)
        
        url = url.replace('http://d35wlof4jnjr70.cloudfront.net/', 'https://s3.amazonaws.com/franklymestorage/')
        types = ['_ultralow', '_low', '_medium', '_opt', '_promo']
        for suffix in types:
            url = url.replace(suffix, '')
        
        if count:
            db.session.execute(text("""UPDATE users 
                                        SET view_count=view_count+:count, total_view_count=total_view_count+:count
                                        WHERE profile_video=:url
                                    """),
                                params = {"url":url, "count":int(count)}
                            )
            db.session.execute(text("""UPDATE posts 
                                        SET view_count=view_count+:count 
                                        WHERE media_url=:url
                                    """),
                                params = {"url":url, "count":int(count)}
                            )
            db.session.commit()

        redis_views.delete(original_url)
    except:
        db.session.rollback()
Example #8
0
    def _migrate(self, label, query_count, query, model, get_entity):
        self.start(label)

        total_count = self.connection_source.execute(
            text(query_count)).fetchone()[0]

        print('Total: {0} rows'.format(total_count))

        query = text(query)
        batch = SimpleBatch(
            self.session_target, self.batch_size, model)
        with transaction.manager, batch:
            count = 0
            for entity_in in self.connection_source.execute(query):
                count += 1
                # ignore associations for these documents because the
                # documents are broken (no latest version)
                if entity_in.main_id not in [224228, 436917] and \
                        entity_in.linked_id not in [224228, 436917]:
                    batch.add(get_entity(entity_in))
                self.progress(count, total_count)

            # the transaction will not be committed automatically when doing
            # a bulk insertion. `mark_changed` forces a commit.
            zope.sqlalchemy.mark_changed(self.session_target)
        self.stop()
Example #9
0
def upgrade():
    # ### commands auto generated by Alembic - please adjust! ###
    op.add_column('product', sa.Column('create_date', sa.DateTime(), nullable=True))

    results = op.get_bind().execute(text("""
    select prd.id, min(po.order_date) from purchase_order po, product prd, purchase_order_line pol
    where pol.product_id = prd.id and po.id = pol.purchase_order_id
    group by prd.id
    """)).fetchall()
    for r in results:
        sup_id = r[0]
        po_date = r[1]
        sql = "update product set create_date = '{0}' where id={1}".format(po_date, sup_id)
        op.get_bind().execute(text(sql))

    results = op.get_bind().execute(text("""
    select p.id, min(so.order_date) from sales_order so, sales_order_line sol,
    product p where so.id = sol.sales_order_id and
    sol.product_id = p.id group by p.id;
    """)).fetchall()
    for r in results:
        sup_id = r[0]
        so_date = r[1]
        sql = "update product set create_date = '{0}' where id={1} and create_date is null".format(so_date, sup_id)
        op.get_bind().execute(text(sql))

    op.get_bind().execute(text("update product set create_date = '{0}' where create_date is null".format(datetime.now())))
    op.alter_column('product', 'create_date', existing_type=sa.DateTime(), nullable=False)
def upgrade():
    conn = op.get_bind()
    conn.execute(text("ALTER TABLE contact" " DROP FOREIGN KEY contact_ibfk_1"))

    conn.execute(text("ALTER TABLE phonenumber" " DROP FOREIGN KEY phonenumber_ibfk_1"))

    conn.execute(text("ALTER TABLE messagecontactassociation" " DROP FOREIGN KEY messagecontactassociation_ibfk_1"))
def downgrade():
    conn = op.get_bind()
    conn.execute(
        text(
            "ALTER TABLE contact"
            " ADD CONSTRAINT contact_ibfk_1 FOREIGN KEY"
            " (namespace_id) REFERENCES namespace(id)"
        )
    )

    conn.execute(
        text(
            "ALTER TABLE phonenumber"
            " ADD CONSTRAINT phonenumber_ibfk_1 FOREIGN KEY"
            " (contact_id) REFERENCES contact(id)"
        )
    )

    conn.execute(
        text(
            "ALTER TABLE messagecontactassociation"
            " ADD CONSTRAINT messagecontactassociation_ibfk_1"
            " FOREIGN KEY (contact_id) REFERENCES contact(id)"
        )
    )
Example #12
0
def submit_recording(connection, data):
    data_json = json.dumps(data, sort_keys=True, separators=(',', ':'))
    data_sha256 = sha256(data_json.encode("utf-8")).hexdigest()

    meta = {"artist": data["artist"], "title": data["title"]}
    meta_json = json.dumps(meta, sort_keys=True, separators=(',', ':'))
    meta_sha256 = sha256(meta_json.encode("utf-8")).hexdigest()

    artist = get_artist_credit(connection, data["artist"])
    if not artist:
        artist = add_artist_credit(connection, data["artist"])
    if "release" in data:
        release = get_release(connection, data["release"])
        if not release:
            release = add_release(connection, data["release"])
    else:
        release = None
    query = text("""INSERT INTO recording_json (data, data_sha256, meta_sha256)
                   VALUES (:data, :data_sha256, :meta_sha256)
                RETURNING id""")
    result = connection.execute(query, {"data": data_json,
                                           "data_sha256": data_sha256,
                                           "meta_sha256": meta_sha256})
    id = result.fetchone()["id"]
    gid = str(uuid.uuid4())
    query = text("""INSERT INTO recording (gid, data, artist, release, submitted)
                    VALUES (:gid, :data, :artist, :release, now())""")
    connection.execute(query, {"gid": gid,
                                  "data": id,
                                  "artist": artist,
                                  "release": release})

    return gid
    def test_filtered_counting( self ):
        user2 = self.user_manager.create( **user2_data )
        history = self.history_manager.create( name='history', user=user2 )
        contents = []
        contents.extend([ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 3 ) ])
        contents.append( self.add_list_collection_to_history( history, contents[:3] ) )
        contents.extend([ self.add_hda_to_history( history, name=( 'hda-' + str( x ) ) ) for x in xrange( 4, 6 ) ])
        contents.append( self.add_list_collection_to_history( history, contents[4:6] ) )

        self.log( "should show correct count with filters" )
        self.hda_manager.delete( contents[1] )
        self.hda_manager.delete( contents[4] )
        contents[6].deleted = True
        self.app.model.context.flush()

        contents[2].visible = False
        contents[5].visible = False
        contents[6].visible = False
        self.app.model.context.flush()

        HDA = self.hda_manager.model_class
        self.assertEqual( self.contents_manager.contents_count( history, filters=[ HDA.deleted == true() ] ), 3 )
        filters = [ text( 'visible = 0' ) ]
        self.assertEqual( self.contents_manager.contents_count( history, filters=filters ), 3 )

        filters = [ text( 'deleted = 1' ), text( 'visible = 0' ) ]
        self.assertEqual( self.contents_manager.contents_count( history, filters=filters ), 1 )
Example #14
0
def upgrade_ubuntu_cobbler_profile_6_0_to_6_1(connection):
    select_query = text("SELECT id, generated FROM attributes")
    update_query = text(
        "UPDATE attributes SET generated = :generated WHERE id = :attr_id")
    for attr_id, generated in connection.execute(select_query):
        attrs = jsonutils.loads(generated)
        if attrs['cobbler']['profile'] == 'ubuntu_1204_x86_64':
            attrs['cobbler']['profile'] = 'ubuntu_1404_x86_64'
            connection.execute(
                update_query,
                generated=jsonutils.dumps(attrs),
                attr_id=attr_id)

    select_query = text("SELECT id, attributes_metadata FROM releases")
    update_query = text(
        "UPDATE releases SET attributes_metadata = :attrs_meta"
        " WHERE id = :release_id")
    for release_id, attributes_metadata in connection.execute(select_query):
        attrs = jsonutils.loads(attributes_metadata)
        if attrs['generated']['cobbler']['profile']['generator_arg'] == \
                'ubuntu_1204_x86_64':
            attrs['generated']['cobbler']['profile']['generator_arg'] = \
                'ubuntu_1404_x86_64'
            connection.execute(
                update_query,
                attrs_meta=jsonutils.dumps(attrs),
                release_id=release_id)
Example #15
0
 def get_table_names(self, connection, schema=None, **kw):
     if schema is None:
         schema = self.default_schema_name
     if schema is None:
         result = connection.execute(
             text("SELECT TABLE_NAME FROM DB..TABLES"))
         return [r[0] for r in result]
     if '.' not in schema:
         schema += '.'
     catalog, schema = schema.split('.', 1)
     if catalog:
         if schema:
             result = connection.execute(
                 text("SELECT TABLE_NAME FROM DB..TABLES WHERE"
                      "TABLE_CATALOG=:catalog AND TABLE_SCHEMA = :schema"),
                 catalog=catalog, schema=schema)
         else:
             result = connection.execute(
                 text("SELECT TABLE_NAME FROM DB..TABLES WHERE"
                      "TABLE_CATALOG=:catalog"), catalog=catalog)
     else:
         result = connection.execute(
             text("SELECT TABLE_NAME FROM DB..TABLES WHERE"
                  "TABLE_SCHEMA=:schema"), schema=schema)
     return [r[0] for r in result]
Example #16
0
def get_candidate_task_ids(project_id, user_id=None, user_ip=None,
                        n_answers=30, offset=0):
    """Get all available tasks for a given project and user."""
    rows = None
    if user_id and not user_ip:
        query = text('''
                     SELECT id FROM task WHERE NOT EXISTS
                     (SELECT task_id FROM task_run WHERE
                     project_id=:project_id AND user_id=:user_id
                        AND task_id=task.id)
                     AND project_id=:project_id AND state !='completed'
                     ORDER BY priority_0 DESC, id ASC LIMIT 10''')
        rows = session.execute(query, dict(project_id=project_id,
                                           user_id=user_id))
    else:
        if not user_ip:
            user_ip = '127.0.0.1'
        query = text('''
                     SELECT id FROM task WHERE NOT EXISTS
                     (SELECT task_id FROM task_run WHERE
                     project_id=:project_id AND user_ip=:user_ip
                        AND task_id=task.id)
                     AND project_id=:project_id AND state !='completed'
                     ORDER BY priority_0 DESC, id ASC LIMIT 10''')
        rows = session.execute(query, dict(project_id=project_id,
                                           user_ip=user_ip))

    return [t.id for t in rows]
Example #17
0
def upgrade_6_0_to_6_1_plugins_cluster_attrs_use_ids_mapping(connection):
    """Convert plugin version mapping to plugin ids

    In Fuel 6.0 we had plugin version in cluster attributes
    to identify which plugin should be enabled or disabled.
    In 6.1 release we have plugins updates feature, it means
    that a single plugin can be updated/overwritten with newer
    version. For example 1.0.0 can be replaced with 1.0.1.
    As result we cannot rely on versions anymore, here we
    convert version mapping to plugin ids.

    See blueprint:
    https://blueprints.launchpad.net/fuel/+spec/plugins-security-fixes-delivery
    """
    select_attrs = text("""SELECT id, editable FROM attributes""")

    select_plugins = text(
        """SELECT id FROM plugins
        WHERE name = :plugin_name AND
        version = :plugin_version""")

    update_attrs = text(
        """UPDATE attributes
        SET editable = :editable
        WHERE id = :id""")

    attrs_list = connection.execute(select_attrs)
    for raw_attrs in attrs_list:
        attr_id = raw_attrs[0]
        attrs = jsonutils.loads(raw_attrs[1])

        for key, attr in six.iteritems(attrs):
            metadata = attr.get('metadata', {})
            plugin_version = metadata.get('plugin_version')
            if not plugin_version:
                continue

            plugin_name = key

            # If there is no plugin with such version
            # and name, it means that something was wrong
            # and somebody deleted the plugin from database
            # we must not fail migration in this case
            plugin_id = None

            plugins = list(connection.execute(
                select_plugins,
                plugin_name=plugin_name,
                plugin_version=plugin_version))

            if plugins:
                plugin_id = plugins[0][0]

            del attr['metadata']['plugin_version']
            attr['metadata']['plugin_id'] = plugin_id

        connection.execute(
            update_attrs,
            editable=jsonutils.dumps(attrs),
            id=attr_id)
Example #18
0
    def test_timestamp(self):
        """Exercise funky TIMESTAMP default syntax."""

        columns = [
            ([TIMESTAMP],
             'TIMESTAMP NULL'),
            ([mysql.MSTimeStamp],
             'TIMESTAMP NULL'),
            ([mysql.MSTimeStamp,
              DefaultClause(sql.text('CURRENT_TIMESTAMP'))],
             "TIMESTAMP DEFAULT CURRENT_TIMESTAMP"),
            ([mysql.MSTimeStamp,
              DefaultClause(sql.text("'1999-09-09 09:09:09'"))],
             "TIMESTAMP DEFAULT '1999-09-09 09:09:09'"),
            ([mysql.MSTimeStamp,
              DefaultClause(sql.text("'1999-09-09 09:09:09' "
                                      "ON UPDATE CURRENT_TIMESTAMP"))],
             "TIMESTAMP DEFAULT '1999-09-09 09:09:09' "
             "ON UPDATE CURRENT_TIMESTAMP"),
            ([mysql.MSTimeStamp,
              DefaultClause(sql.text("CURRENT_TIMESTAMP "
                                      "ON UPDATE CURRENT_TIMESTAMP"))],
             "TIMESTAMP DEFAULT CURRENT_TIMESTAMP "
             "ON UPDATE CURRENT_TIMESTAMP"),
            ]
        for spec, expected in columns:
            c = Column('t', *spec)
            Table('t', MetaData(), c)
            self.assert_compile(
                schema.CreateColumn(c),
                "t %s" % expected

            )
Example #19
0
def loadtoS3andRS():

    #Get list of file names. This will be used to iterate the COPY command
    filenames = next(os.walk('/dev/rfs_folder'))[2]

    get_asin_files = []

    for x in filenames:
        get_asin_files.append(x)


    get_asin_files.sort()

    #Connect to Redshift
    engine_string = "postgresql+psycopg2://{user}:{password}@{endpoint}:{port}/{dbname}".format (user="******",password="******",endpoint="xxx.redshift.amazonaws.com",port=8192,dbname="xxx")


    engine = create_engine(engine_string)
    connection = engine.connect()


    #Truncate TABLE SCHEMA.TABLE_NAME
    connection.execute(text("TRUNCATE TABLE SCHEMA.TABLE_NAME").execution_options(autocommit=True))

    #For loop statement to iterate the list of filenames for COPY
    for name in get_asin_files:

        #The first file will include a header. Need to implement ignoreheader in sql statement
        if name in get_asin_files[0]:
            connection.execute(text("COPY SCHEMA.TABLE_NAME(column names) FROM 's3://xxx/redshift_test/"+name+"' access_key_id 'xxx' secret_access_key 'xxx' ignoreheader 1 delimiter '\t';").execution_options(autocommit=True))
        else:
            connection.execute(text("COPY SCHEMA.TABLE_NAME(column names) FROM 's3://xxx/redshift_test/"+name+"' access_key_id 'xxx' secret_access_key 'xxx' delimiter '\t';").execution_options(autocommit=True))


    connection.close()
Example #20
0
def main(url = Config.get("URL", "ships"), debug=False):
    req = urllib2.Request(url)
    req.add_header('User-Agent', useragent)
    stats = urllib2.urlopen(req).read()
    session.execute(Ship.__table__.delete())
    if Config.get("DB", "dbms") == "mysql":
        session.execute(text("ALTER TABLE ships AUTO_INCREMENT=1;", bindparams=[false]))
    else:
        session.execute(text("SELECT setval('ships_id_seq', 1, :false);", bindparams=[false]))
    
    for line in sre.findall(stats):
        ship = Ship()
        line = list(line)
        for index, key in enumerate(keys):
            if line[index] in mapping:
                line[index] = mapping[line[index]]
            elif line[index].isdigit():
                line[index] = int(line[index])
            if line[index] not in ('-', '',):
                setattr(ship,key,line[index])
        ship.total_cost = ship.metal + ship.crystal + ship.eonium
        if debug: print "%12s%12s%12s%12s" % (ship.name, ship.class_, ship.race, ship.type,)
        
        session.add(ship)
    
    session.commit()
    session.close()
def post_notifications(post_id):

    '''
    Sends out both Push and email.
    Called after the low quality of
    video is ready. Since this is a super high priority notification
    it is sent to all those users who upvoted, asked the question or follows the user'''
    result = db.session.execute(text('''Select
                                        aa.first_name, q.body,
                                         n.id, n.link, n.type
                                         from posts p
                                         left join questions q on q.id = p.question
                                         left join users aa on aa.id = p.answer_author
                                         left join notifications n on n.object_id = :post_id
                                         where p.id = :post_id
                                         and n.type in ('post-add-self_user','post-add-following_user')
                                         group by n.type
                                         limit 2 ;
                                         '''), params={'post_id': post_id})

    try:
        for row in result:

            answer_author_name = row[0]
            question_body = row[1]
            notification_id = row[2]
            link = row[3]
            notification_type = row[4]


            #Get a set of users who haven't been sent this gcm notification yet
            #This includes the question author
            results = db.session.execute(text('''Select
                                             un.user_id, u.first_name, u.email
                                             from user_notifications un
                                             left join user_push_notifications upn
                                             on upn.notification_id  = :notification_id and upn.user_id = un.user_id
                                             left join users u on u.id = un.user_id
                                             where
                                             u.monkness = -1 and
                                             un.notification_id = :notification_id
                                             and upn.user_id is null'''),
                                             params={'notification_id': notification_id})

            print 'Notification id: ', notification_id
            print 'Notification type:', notification_type

            for user in results:

                push.send(notification_id=notification_id, user_id=user[0])
                if notification_type == 'post-add-self_user':
                    print user.email
                    make_email.question_answered(receiver_email=user[2], receiver_name=user[1],
                                               celebrity_name=answer_author_name,
                                               user_id=row[0],
                                               question=question_body, web_link=link,
                                               post_id=post_id)
                    break
    except ObjectNotFoundException:
        pass
Example #22
0
    def has_sequence(self, connection, sequence_name, schema=None):
        if schema is None:
            cursor = connection.execute(
                sql.text(
                    "SELECT relname FROM pg_class c join pg_namespace n on "
                    "n.oid=c.relnamespace where relkind='S' and "
                    "n.nspname=current_schema() "
                    "and lower(relname)=:name",
                    bindparams=[
                        sql.bindparam('name', unicode(sequence_name.lower()),
                        type_=sqltypes.Unicode)
                    ] 
                )
            )
        else:
            cursor = connection.execute(
                sql.text(
                "SELECT relname FROM pg_class c join pg_namespace n on "
                "n.oid=c.relnamespace where relkind='S' and "
                "n.nspname=:schema and lower(relname)=:name",
                bindparams=[
                    sql.bindparam('name', unicode(sequence_name.lower()),
                     type_=sqltypes.Unicode),
                    sql.bindparam('schema', 
                                unicode(schema), type_=sqltypes.Unicode)
                ]
            )
            )

        return bool(cursor.first())
Example #23
0
    def test_mysql_innodb(self):
        """Test that table creation on mysql only builds InnoDB tables."""
        with mock.patch('manila.db.sqlalchemy.api.get_engine',
                        return_value=self.engine):
            self._walk_versions(snake_walk=False, downgrade=False)

        # sanity check
        sanity_check = """SELECT count(*)
                          FROM information_schema.tables
                          WHERE table_schema = :database;"""
        total = self.engine.execute(
            text(sanity_check),
            database=self.engine.url.database)

        self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")

        noninnodb_query = """
            SELECT count(*)
            FROM information_schema.TABLES
            WHERE table_schema = :database
                AND engine != 'InnoDB'
                AND table_name != 'alembic_version';"""

        count = self.engine.execute(
            text(noninnodb_query),
            database=self.engine.url.database
        ).scalar()
        self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
Example #24
0
def createServiceRequest():
	if request.method == 'POST':
		sql = text('''INSERT INTO service_request(client_username, address, title, description, tag)
						VALUES(:user, :address, :title, :description, :tag);''')
		db.engine.execute(sql, user=session["user"], address=request.form.get('address'), title=request.form.get('title'),\
							description=request.form.get('description'), tag=request.form.get('tag'))
		sql2 = text('''SELECT service_id FROM service_request WHERE client_username=:user AND address=:address AND
						title=:title AND description=:description;''')
		result = db.engine.execute(sql2, user=session["user"], address=request.form.get('address'), title=request.form.get('title'),\
							description=request.form.get('description'))
		idnum = result.fetchone()
		days = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
		for day in days:
			times = request.form.getlist(day)
			for time in times:
				sql = text('''INSERT INTO service_schedule(service_id,day,hour) VALUES(:id,:day,:hour);''')
				db.engine.execute(sql, id=idnum[0], day=day, hour=time)
		flash('Service request created.')
		return redirect('/requests/'+str(idnum[0])+'/workers')
	if not session.get('user'):
		flash('Please login before creating requests.')
		return redirect('/')
	elif session['type'] == 'worker':
		flash('You must be a client to create service requests.')
		return redirect('/')
	else:
		sql = text('''SELECT tag FROM job_tag''')
		results = db.engine.execute(sql)
		tags = [res[0] for res in results]
		return render_template('createRequest.jade', tags=tags)
def getAltitudesChilds(connection, cd_ref):
    # construction du select  de la requete a partir des cles de la table
    sql = """
        SELECT label_altitude
        FROM atlas.bib_altitudes
        ORDER BY altitude_min
    """
    qalt = connection.execute(text(sql))
    alt = [k[0] for k in qalt]

    sumSelect = ', '.join(
        "SUM({}) AS {}".format(k, k) for k in alt
    )

    sql = """
        SELECT {sumSelect}
        FROM atlas.vm_altitudes alt
        WHERE
            alt.cd_ref IN (
                SELECT * FROM atlas.find_all_taxons_childs(:thiscdref)
            ) OR alt.cd_ref = :thiscdref
    """.format(sumSelect=sumSelect)
    mesAltitudes = connection.execute(text(sql), thiscdref=cd_ref)

    altiList = list()
    for a in mesAltitudes:
        for k in alt:
            temp = {"altitude": k.replace('_', '-')[1:], "value": getattr(a, k)}
            altiList.append(temp)

    return altiList
Example #26
0
    def get_pk_constraint(self, connection, table_name, schema=None, **kw):
        table_id = self.get_table_id(connection, table_name, schema,
                                     info_cache=kw.get("info_cache"))

        # index_category=1 -> primary key
        PK_SQL = text("""
          SELECT t.table_name AS table_name, i.index_id as index_id,
                 i.index_name AS name
          FROM sys.sysidx i join sys.systab t on i.table_id=t.table_id
          WHERE t.table_id = :table_id and i.index_category = 1
        """)

        results = connection.execute(PK_SQL, table_id=table_id)
        pks = results.fetchone()
        results.close()

        if not pks:
            return {"constrained_columns": [],
                    "name": None}

        PKCOL_SQL = text("""
             select tc.column_name as col
             FROM sys.sysidxcol ic
             join sys.systabcol tc on (ic.table_id=tc.table_id and ic.column_id=tc.column_id)
             WHERE ic.index_id = :index_id and ic.table_id = :table_id
            """)
        pk_cols = connection.execute(PKCOL_SQL, index_id=pks["index_id"],
                                     table_id=table_id )
        column_names = [pkc["col"] for pkc in pk_cols]
        return {"constrained_columns": column_names,
                "name": pks["name"]}
Example #27
0
    def get_unique_constraints(self, connection, table_name, schema=None, **kw):
        # Same as get_indexes except only for "unique"=2
        table_id = self.get_table_id(connection, table_name, schema,
                                     info_cache=kw.get("info_cache"))

        # unique=2 -> unique constraint
        INDEX_SQL = text("""
          SELECT i.index_id as index_id, i.index_name AS name
          FROM sys.sysidx i join sys.systab t on i.table_id=t.table_id
          WHERE t.table_id = :table_id and i.index_category = 3 and i."unique"=2
        """)

        results = connection.execute(INDEX_SQL, table_id=table_id)
        indexes = []
        for r in results:
            INDEXCOL_SQL = text("""
             select tc.column_name as col
             FROM sys.sysidxcol ic
             join sys.systabcol tc on (ic.table_id=tc.table_id and ic.column_id=tc.column_id)
             WHERE ic.index_id = :index_id and ic.table_id = :table_id
             ORDER BY ic.sequence ASC
            """)
            idx_cols = connection.execute(INDEXCOL_SQL, index_id=r["index_id"],
                                          table_id=table_id)
            column_names = [ic["col"] for ic in idx_cols]
            index_info = {"name": r["name"],
                          "column_names": column_names}
            indexes.append(index_info)

        return indexes       
Example #28
0
 def has_table(self, connection, table_name, schema=None):
     # seems like case gets folded in pg_class...
     if schema is None:
         cursor = connection.execute(
             sql.text(
             "select relname from pg_class c join pg_namespace n on "
             "n.oid=c.relnamespace where n.nspname=current_schema() and "
             "lower(relname)=:name",
             bindparams=[
                     sql.bindparam('name', unicode(table_name.lower()),
                     type_=sqltypes.Unicode)]
             )
         )
     else:
         cursor = connection.execute(
             sql.text(
             "select relname from pg_class c join pg_namespace n on "
             "n.oid=c.relnamespace where n.nspname=:schema and "
             "lower(relname)=:name",
                 bindparams=[
                     sql.bindparam('name', 
                     unicode(table_name.lower()), type_=sqltypes.Unicode),
                     sql.bindparam('schema', 
                     unicode(schema), type_=sqltypes.Unicode)] 
             )
         )
     return bool(cursor.first())
Example #29
0
    def get_indexes(self, connection, table_name, schema=None, **kw):
        table_id = self.get_table_id(connection, table_name, schema,
                                     info_cache=kw.get("info_cache"))

        # index_category=3 -> not primary key, not foreign key, not text index
        # unique=1 -> unique index, 2 -> unique constraint, 5->unique index with
        # nulls not distinct
        INDEX_SQL = text("""
          SELECT i.index_id as index_id, i.index_name AS name,
                 if i."unique" in (1,2,5) then 1 else 0 endif AS "unique"
          FROM sys.sysidx i join sys.systab t on i.table_id=t.table_id
          WHERE t.table_id = :table_id and i.index_category = 3
        """)

        results = connection.execute(INDEX_SQL, table_id=table_id)
        indexes = []
        for r in results:
            INDEXCOL_SQL = text("""
             select tc.column_name as col
             FROM sys.sysidxcol ic
             join sys.systabcol tc on (ic.table_id=tc.table_id and ic.column_id=tc.column_id)
             WHERE ic.index_id = :index_id and ic.table_id = :table_id
             ORDER BY ic.sequence ASC
            """)
            idx_cols = connection.execute(INDEXCOL_SQL, index_id=r["index_id"],
                                          table_id=table_id)
            column_names = [ic["col"] for ic in idx_cols]
            index_info = {"name": r["name"],
                          "unique": bool(r["unique"]),
                          "column_names": column_names}
            indexes.append(index_info)

        return indexes
Example #30
0
def sources2removals(source_list, suite_id, session):
    """Compute removals items given a list of names of source packages

    @type source_list: list
    @param source_list: A list of names of source packages

    @type suite_id: int
    @param suite_id: The id of the suite from which these sources should be removed

    @type session: SQLA Session
    @param session: The database session in use

    @rtype: list
    @return: A list of items to be removed to remove all sources and their binaries from the given suite
    """
    to_remove = []
    params = {"suite_id": suite_id, "sources": tuple(source_list)}
    q = session.execute(sql.text("""
                    SELECT s.source, s.version, 'source', s.id
                    FROM source s
                         JOIN src_associations sa ON sa.source = s.id
                    WHERE sa.suite = :suite_id AND s.source IN :sources"""), params)
    to_remove.extend(q)
    q = session.execute(sql.text("""
                    SELECT b.package, b.version, a.arch_string, b.id
                    FROM binaries b
                         JOIN bin_associations ba ON b.id = ba.bin
                         JOIN architecture a ON b.architecture = a.id
                         JOIN source s ON b.source = s.id
                    WHERE ba.suite = :suite_id AND s.source IN :sources"""), params)
    to_remove.extend(q)
    return to_remove
Example #31
0
 def _get_default_schema_name(self, connection):
     return connection.scalar(
         text("SELECT user_name() as user_name").columns(username=Unicode)
     )
Example #32
0
def getSetByID(a):
    i = text("SELECT * " "FROM komplety " "WHERE idkompletu LIKE :y")
    result = connection.execute(i, {"y": a}).fetchall()
    return result
Example #33
0
def getAssortmentByName(a):
    i = text("SELECT * " "FROM asortyment " "WHERE Nazwa LIKE :y")
    result = connection.execute(i, {"y": a}).fetchall()
    return result
Example #34
0
def getAssortmentFromID(a):
    i = text("SELECT * " "FROM asortyment " "WHERE idasortymentu LIKE :y")
    result = connection.execute(i, {"y": a}).fetchall()
    return result
Example #35
0
def presentSM():
    a = text("SELECT * FROM BO")
    data = connection.execute(a).fetchall()

    a = text("SELECT UPDATE_TIME "
             "FROM   information_schema.tables "
             "WHERE  TABLE_SCHEMA = 'db' "
             "AND TABLE_NAME = 'BO'; ")

    # lastSMupdate = connection.execute(a).fetchall()
    lastSMupdate = [a[0] for a in connection.execute(a)]

    a = text("SELECT UPDATE_TIME "
             "FROM   information_schema.tables "
             "WHERE  TABLE_SCHEMA = 'db' "
             "AND TABLE_NAME = 'PZdocs'; ")

    lastPZupdate = [a[0] for a in connection.execute(a)]

    a = text("SELECT UPDATE_TIME "
             "FROM   information_schema.tables "
             "WHERE  TABLE_SCHEMA = 'db' "
             "AND TABLE_NAME = 'RWdocs'; ")

    lastRWupdate = [a[0] for a in connection.execute(a)]

    a = text("SELECT UPDATE_TIME "
             "FROM   information_schema.tables "
             "WHERE  TABLE_SCHEMA = 'db' "
             "AND TABLE_NAME = 'RAdocs'; ")

    lastRAupdate = [a[0] for a in connection.execute(a)]

    a = text("SELECT UPDATE_TIME "
             "FROM   information_schema.tables "
             "WHERE  TABLE_SCHEMA = 'db' "
             "AND TABLE_NAME = 'ZDdocs'; ")

    lastZDupdate = [a[0] for a in connection.execute(a)]

    a = text("SELECT UPDATE_TIME "
             "FROM   information_schema.tables "
             "WHERE  TABLE_SCHEMA = 'db' "
             "AND TABLE_NAME = 'ZWdocs'; ")

    lastZWupdate = [a[0] for a in connection.execute(a)]

    if not lastSMupdate:  #Porównanie daty ostatniej aktualizacji tabeli do określenia rodzaju dokumentu
        return render_template(
            'stockDocuments.html',
            data=data,
            lastSMupdate="Brak modyfikacji bilansu otwarcia")
    else:
        if lastPZupdate:
            if lastPZupdate[0] == lastSMupdate[0]:
                a = text("SELECT Dokument FROM PZdocs "
                         "ORDER BY Data DESC, Dokument DESC;")
                lastDoc = connection.execute(a).fetchall()
                lastPZ = [a[0] for a in lastDoc]
                return render_template('stockDocuments.html',
                                       data=data,
                                       lastSMupdate=lastPZ[0])
        if lastRWupdate:
            if lastRWupdate[0] == lastSMupdate[0]:
                return render_template(
                    'stockDocuments.html',
                    data=data,
                    lastSMupdate="Ostatni przesłany dokument: RW")
        if lastZWupdate:
            if lastZWupdate[0] == lastSMupdate[0]:
                return render_template(
                    'stockDocuments.html',
                    data=data,
                    lastSMupdate="Ostatni przesłany dokument: ZW")
        if lastZDupdate:
            if lastZDupdate[0] == lastSMupdate[0]:
                return render_template(
                    'stockDocuments.html',
                    data=data,
                    lastSMupdate="Ostatni przesłany dokument: ZD")
        if lastRAupdate:
            if lastRAupdate[0] == lastSMupdate[0]:
                return render_template(
                    'stockDocuments.html',
                    data=data,
                    lastSMupdate="Ostatni przesłany dokument: Raport sprzedaży"
                )
        else:
            return render_template(
                'stockDocuments.html',
                data=data,
                lastSMupdate="Ostatni przesłany dokument: Nieznany")
Example #36
0
def index():
    a = text("CREATE TABLE IF NOT EXISTS db.PZdocs "
             "(ID BIGINT NOT NULL, "
             "Nazwa TEXT NOT NULL, "
             "Dokument TEXT NULL, "
             "Jednostka TEXT NULL, "
             "Ilosc DOUBLE NULL, "
             "Cena DOUBLE NULL, "
             "Data TEXT NULL)"
             "ENGINE=InnoDB "
             "DEFAULT CHARSET=utf8mb4 "
             "COLLATE=utf8mb4_general_ci; ")

    connection.execute(a)

    a = text("CREATE TABLE IF NOT EXISTS db.RAdocs "
             "(ID BIGINT NOT NULL, "
             "Nazwa TEXT NOT NULL, "
             "Dokument TEXT NULL, "
             "Ilosc TEXT NULL, "
             "Netto DOUBLE NULL, "
             "VAT DOUBLE NULL, "
             "Brutto TEXT NULL, "
             "Stawka TEXT NULL, "
             "od TEXT NULL, "
             "do TEXT NULL)"
             "ENGINE=InnoDB "
             "DEFAULT CHARSET=utf8mb4 "
             "COLLATE=utf8mb4_general_ci; ")

    connection.execute(a)

    a = text("CREATE TABLE IF NOT EXISTS db.RWdocs "
             "(ID BIGINT NOT NULL, "
             "Nazwa TEXT NOT NULL, "
             "Dokument TEXT NULL, "
             "Jednostka TEXT NULL, "
             "Ilosc DOUBLE NULL, "
             "Data TEXT NULL)"
             "ENGINE=InnoDB "
             "DEFAULT CHARSET=utf8mb4 "
             "COLLATE=utf8mb4_general_ci; ")

    connection.execute(a)

    a = text("CREATE TABLE IF NOT EXISTS db.ZWdocs "
             "(ID BIGINT NOT NULL, "
             "Nazwa TEXT NOT NULL, "
             "Dokument TEXT NULL, "
             "Jednostka TEXT NULL, "
             "Ilosc DOUBLE NULL, "
             "Data TEXT NULL)"
             "ENGINE=InnoDB "
             "DEFAULT CHARSET=utf8mb4 "
             "COLLATE=utf8mb4_general_ci; ")

    connection.execute(a)

    a = text("CREATE TABLE IF NOT EXISTS db.ZDdocs "
             "(ID BIGINT NOT NULL, "
             "Nazwa TEXT NOT NULL, "
             "Dokument TEXT NULL, "
             "Jednostka TEXT NULL, "
             "Ilosc DOUBLE NULL, "
             "Data TEXT NULL)"
             "ENGINE=InnoDB "
             "DEFAULT CHARSET=utf8mb4 "
             "COLLATE=utf8mb4_general_ci; ")

    connection.execute(a)

    # Set The upload HTML template '\templates\index.html'
    return render_template('index.html')
Example #37
0
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):

        table_id = self.get_table_id(
            connection, table_name, schema, info_cache=kw.get("info_cache")
        )

        table_cache = {}
        column_cache = {}
        foreign_keys = []

        table_cache[table_id] = {"name": table_name, "schema": schema}

        COLUMN_SQL = text(
            """
          SELECT c.colid AS id, c.name AS name
          FROM syscolumns c
          WHERE c.id = :table_id
        """
        )

        results = connection.execute(COLUMN_SQL, table_id=table_id)
        columns = {}
        for col in results:
            columns[col["id"]] = col["name"]
        column_cache[table_id] = columns

        REFCONSTRAINT_SQL = text(
            """
          SELECT o.name AS name, r.reftabid AS reftable_id,
            r.keycnt AS 'count',
            r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
            r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
            r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
            r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
            r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
            r.fokey16 AS fokey16,
            r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
            r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
            r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
            r.refkey10 AS refkey10, r.refkey11 AS refkey11,
            r.refkey12 AS refkey12, r.refkey13 AS refkey13,
            r.refkey14 AS refkey14, r.refkey15 AS refkey15,
            r.refkey16 AS refkey16
          FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
          WHERE r.tableid = :table_id
        """
        )
        referential_constraints = connection.execute(
            REFCONSTRAINT_SQL, table_id=table_id
        ).fetchall()

        REFTABLE_SQL = text(
            """
          SELECT o.name AS name, u.name AS 'schema'
          FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
          WHERE o.id = :table_id
        """
        )

        for r in referential_constraints:
            reftable_id = r["reftable_id"]

            if reftable_id not in table_cache:
                c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
                reftable = c.fetchone()
                c.close()
                table_info = {"name": reftable["name"], "schema": None}
                if (
                    schema is not None
                    or reftable["schema"] != self.default_schema_name
                ):
                    table_info["schema"] = reftable["schema"]

                table_cache[reftable_id] = table_info
                results = connection.execute(COLUMN_SQL, table_id=reftable_id)
                reftable_columns = {}
                for col in results:
                    reftable_columns[col["id"]] = col["name"]
                column_cache[reftable_id] = reftable_columns

            reftable = table_cache[reftable_id]
            reftable_columns = column_cache[reftable_id]

            constrained_columns = []
            referred_columns = []
            for i in range(1, r["count"] + 1):
                constrained_columns.append(columns[r["fokey%i" % i]])
                referred_columns.append(reftable_columns[r["refkey%i" % i]])

            fk_info = {
                "constrained_columns": constrained_columns,
                "referred_schema": reftable["schema"],
                "referred_table": reftable["name"],
                "referred_columns": referred_columns,
                "name": r["name"],
            }

            foreign_keys.append(fk_info)

        return foreign_keys
Example #38
0
 def wrap_sql_limit(self, sql, limit=1000):
     qry = (select('*').select_from(
         TextAsFrom(text(sql), ['*']).alias('inner_qry')).limit(limit))
     return self.compile_sqla_query(qry)
Example #39
0
class Match(db.Model):
    __tablename__ = 'match'

    id = db.Column(db.Integer, primary_key=True)
    event_id = db.Column(
        db.Integer,
        db.ForeignKey('event.id', ondelete="CASCADE", onupdate="CASCADE"))

    brief = db.Column(db.String(200), nullable=False, server_default='')
    forecast = db.Column(db.String(200), nullable=False, server_default='')
    round = db.Column(db.Integer, nullable=False, server_default=text('0'))
    visible = db.Column(TINYINT,
                        nullable=False,
                        server_default=text(MatchVisibleStatus.SHOWN))
    status = db.Column(TINYINT,
                       nullable=False,
                       server_default=text(MatchStatus.NOTSTARTED))
    start_tm = db.Column(db.TIMESTAMP,
                         nullable=False,
                         server_default=text('0'),
                         index=True)
    finish_tm = db.Column(db.TIMESTAMP,
                          nullable=False,
                          server_default=text('0'))
    viewers = db.Column(db.Integer, nullable=False, server_default=text('0'))
    team1_id = db.Column(db.Integer, nullable=False)
    team1_score = db.Column(db.Integer,
                            nullable=False,
                            server_default=text('0'))
    team1_likes = db.Column(db.Integer,
                            nullable=False,
                            server_default=text('0'))
    team2_id = db.Column(db.Integer, nullable=False)
    team2_score = db.Column(db.Integer,
                            nullable=False,
                            server_default=text('0'))
    team2_likes = db.Column(db.Integer,
                            nullable=False,
                            server_default=text('0'))

    match_news = db.relationship('MatchNews', backref='match', lazy='dynamic')
    match_video = db.relationship('MatchVideo',
                                  backref='match',
                                  lazy='dynamic')
    match_live = db.relationship('MatchLive', backref='match', lazy='dynamic')

    created_at = db.Column(db.TIMESTAMP,
                           server_default=db.func.current_timestamp(),
                           nullable=False)
    updated_at = db.Column(
        db.TIMESTAMP,
        server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'),
        nullable=False)

    def to_dict(self):
        return dict(id=self.id,
                    event_id=self.event_id,
                    brief=self.brief,
                    forecast=self.forecast,
                    round=self.round,
                    visible=self.visible,
                    status=self.status,
                    start_tm=to_timestamp(self.start_tm),
                    finish_tm=to_timestamp(self.finish_tm),
                    viewers=self.viewers,
                    team1_id=self.team1_id,
                    team1_score=self.team1_score,
                    team1_likes=self.team1_likes,
                    team2_id=self.team2_id,
                    team2_score=self.team2_score,
                    team2_likes=self.team2_likes)
Example #40
0
    def reflecttable(self, connection, table, include_columns):
        # This is defined in the function, as it relies on win32com constants,
        # that aren't imported until dbapi method is called
        if not hasattr(self, 'ischema_names'):
            self.ischema_names = {
                const.dbByte:       AcBinary,
                const.dbInteger:    AcInteger,
                const.dbLong:       AcInteger,
                const.dbSingle:     AcFloat,
                const.dbDouble:     AcFloat,
                const.dbDate:       AcDateTime,
                const.dbLongBinary: AcBinary,
                const.dbMemo:       AcText,
                const.dbBoolean:    AcBoolean,
                const.dbText:       AcUnicode, # All Access strings are unicode
                const.dbCurrency:   AcNumeric,
            }

        # A fresh DAO connection is opened for each reflection
        # This is necessary, so we get the latest updates
        dtbs = daoEngine.OpenDatabase(connection.engine.url.database)

        try:
            for tbl in dtbs.TableDefs:
                if tbl.Name.lower() == table.name.lower():
                    break
            else:
                raise exc.NoSuchTableError(table.name)

            for col in tbl.Fields:
                coltype = self.ischema_names[col.Type]
                if col.Type == const.dbText:
                    coltype = coltype(col.Size)

                colargs = \
                {
                    'nullable': not(col.Required or col.Attributes & const.dbAutoIncrField),
                }
                default = col.DefaultValue

                if col.Attributes & const.dbAutoIncrField:
                    colargs['default'] = schema.Sequence(col.Name + '_seq')
                elif default:
                    if col.Type == const.dbBoolean:
                        default = default == 'Yes' and '1' or '0'
                    colargs['server_default'] = schema.DefaultClause(sql.text(default))

                table.append_column(schema.Column(col.Name, coltype, **colargs))

                # TBD: check constraints

            # Find primary key columns first
            for idx in tbl.Indexes:
                if idx.Primary:
                    for col in idx.Fields:
                        thecol = table.c[col.Name]
                        table.primary_key.add(thecol)
                        if isinstance(thecol.type, AcInteger) and \
                                not (thecol.default and isinstance(thecol.default.arg, schema.Sequence)):
                            thecol.autoincrement = False

            # Then add other indexes
            for idx in tbl.Indexes:
                if not idx.Primary:
                    if len(idx.Fields) == 1:
                        col = table.c[idx.Fields[0].Name]
                        if not col.primary_key:
                            col.index = True
                            col.unique = idx.Unique
                    else:
                        pass # TBD: multi-column indexes


            for fk in dtbs.Relations:
                if fk.ForeignTable != table.name:
                    continue
                scols = [c.ForeignName for c in fk.Fields]
                rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields]
                table.append_constraint(schema.ForeignKeyConstraint(scols, rcols, link_to_name=True))

        finally:
            dtbs.Close()
Example #41
0
def write_stations_to_csv(session,
                          path,
                          today,
                          start_time=None,
                          end_time=None):
    linesep = "\r\n"

    where = "lat IS NOT NULL AND lon IS NOT NULL"
    if start_time is not None and end_time is not None:
        where = where + ' AND modified >= "%s" AND modified < "%s"'
        fmt = "%Y-%m-%d %H:%M:%S"
        where = where % (start_time.strftime(fmt), end_time.strftime(fmt))
    else:
        # limit to cells modified in the last 12 months
        one_year = today - timedelta(days=365)
        where = where + ' AND modified >= "%s"' % one_year.strftime("%Y-%m-%d")

    header_row = ",".join(_FIELD_NAMES) + linesep

    tables = [shard.__tablename__ for shard in CellShard.shards().values()]
    stmt = """SELECT
    `cellid`,
    CONCAT_WS(",",
        CASE radio
            WHEN 0 THEN "GSM"
            WHEN 2 THEN "UMTS"
            WHEN 3 THEN "LTE"
            ELSE ""
        END,
        `mcc`,
        `mnc`,
        `lac`,
        `cid`,
        COALESCE(`psc`, ""),
        ROUND(`lon`, 7),
        ROUND(`lat`, 7),
        COALESCE(`radius`, "0"),
        COALESCE(`samples`, "0"),
        "1",
        COALESCE(UNIX_TIMESTAMP(`created`), ""),
        COALESCE(UNIX_TIMESTAMP(`modified`), ""),
        ""
    ) AS `cell_value`
FROM %s
WHERE %s AND `cellid` > :cellid
ORDER BY `cellid`
LIMIT :limit
"""

    with util.gzip_open(path, "w", compresslevel=5) as gzip_wrapper:
        with gzip_wrapper as gzip_file:
            gzip_file.write(header_row)
            for table in tables:
                table_stmt = text(stmt % (table, where))
                min_cellid = ""
                limit = 25000
                while True:
                    rows = session.execute(
                        table_stmt.bindparams(limit=limit,
                                              cellid=min_cellid)).fetchall()
                    if rows:
                        buf = "".join(row.cell_value + linesep for row in rows)
                        gzip_file.write(buf)
                        min_cellid = rows[-1].cellid
                    else:
                        break
Example #42
0
class MatchNews(db.Model):
    __tablename__ = 'match_news'

    id = db.Column(db.Integer, primary_key=True)
    match_id = db.Column(
        db.Integer,
        db.ForeignKey('match.id', ondelete="CASCADE", onupdate="CASCADE"))
    title = db.Column(db.String(100), nullable=False)
    title_prior = db.Column(db.String(100), nullable=False, server_default='')
    type = db.Column(db.String(20), nullable=False)
    args = db.Column(db.String(256), nullable=False, server_default='')
    visible = db.Column(TINYINT,
                        nullable=False,
                        server_default=text(MatchNewsStatus.SHOWN))
    subtitle = db.Column(db.String(200), nullable=False, server_default='')
    site = db.Column(db.String(20), nullable=False, server_default='')
    origin = db.Column(db.String(20), nullable=False, server_default='')
    content = db.Column(db.Text)
    image = db.Column(db.String(256), nullable=False, server_default='')
    large_image = db.Column(db.String(256), nullable=False, server_default='')
    play_url = db.Column(db.String(1024), nullable=False, server_default='')
    play_html = db.Column(db.String(8192), nullable=False, server_default='')
    play_code = db.Column(db.String(1024), nullable=False, server_default='')
    top = db.Column(TINYINT, nullable=False, server_default=text('0'))
    pin = db.Column(TINYINT, nullable=False, server_default=text('0'))
    isvr = db.Column(TINYINT, nullable=False, server_default=text('0'))
    source_url = db.Column(db.String(200),
                           nullable=False,
                           server_default='',
                           index=True)

    publish_tm = db.Column(db.TIMESTAMP,
                           server_default=db.func.current_timestamp(),
                           nullable=False,
                           index=True)
    created_at = db.Column(db.TIMESTAMP,
                           server_default=db.func.current_timestamp(),
                           nullable=False)
    updated_at = db.Column(
        db.TIMESTAMP,
        server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'),
        nullable=False)

    __table_args__ = (Index('ix_match_news_title_and_site', "site", "title"), )

    def to_dict(self):
        return dict(id=self.id,
                    match_id=self.match_id,
                    title=self.title,
                    type=self.type,
                    visible=self.visible,
                    subtitle=self.subtitle,
                    site=self.site,
                    origin=self.origin,
                    content=self.content,
                    image=add_image_domain(self.image),
                    imageCode=self.image,
                    play_url=self.play_url,
                    play_code=self.play_code,
                    play_html=self.play_html,
                    isvr=self.isvr,
                    publish_tm=to_timestamp(self.publish_tm))
Example #43
0
    def query(self,
              constraint,
              sortby=None,
              typenames=None,
              maxrecords=10,
              startposition=0):
        ''' Query records from underlying repository '''

        # run the raw query and get total
        if 'where' in constraint:  # GetRecords with constraint
            LOGGER.debug('constraint detected')
            query = self.session.query(self.dataset).filter(
                text(constraint['where'])).params(
                    self._create_values(constraint['values']))
        else:  # GetRecords sans constraint
            LOGGER.debug('No constraint detected')
            query = self.session.query(self.dataset)

        total = self._get_repo_filter(query).count()

        if util.ranking_pass:  #apply spatial ranking
            #TODO: Check here for dbtype so to extract wkt from postgis native to wkt
            LOGGER.debug('spatial ranking detected')
            LOGGER.debug(
                'Target WKT: %s',
                getattr(
                    self.dataset, self.context.md_core_model['mappings']
                    ['pycsw:BoundingBox']))
            LOGGER.debug('Query WKT: %s', util.ranking_query_geometry)
            query = query.order_by(
                func.get_spatial_overlay_rank(
                    getattr(
                        self.dataset, self.context.md_core_model['mappings']
                        ['pycsw:BoundingBox']),
                    util.ranking_query_geometry).desc())
            #trying to make this wsgi safe
            util.ranking_pass = False
            util.ranking_query_geometry = ''

        if sortby is not None:  # apply sorting
            LOGGER.debug('sorting detected')
            #TODO: Check here for dbtype so to extract wkt from postgis native to wkt
            sortby_column = getattr(self.dataset, sortby['propertyname'])

            if sortby['order'] == 'DESC':  # descending sort
                if 'spatial' in sortby and sortby['spatial']:  # spatial sort
                    query = query.order_by(
                        func.get_geometry_area(sortby_column).desc())
                else:  # aspatial sort
                    query = query.order_by(sortby_column.desc())
            else:  # ascending sort
                if 'spatial' in sortby and sortby['spatial']:  # spatial sort
                    query = query.order_by(
                        func.get_geometry_area(sortby_column))
                else:  # aspatial sort
                    query = query.order_by(sortby_column)

        # always apply limit and offset
        return [
            str(total),
            self._get_repo_filter(query).limit(maxrecords).offset(
                startposition).all()
        ]
Example #44
0
def clean(now_date, archives, max_delete, session):
    cnf = Config()

    count = 0
    size = 0

    Logger.log(["Cleaning out packages..."])

    morguedir = cnf.get("Dir::Morgue", os.path.join("Dir::Pool", 'morgue'))
    morguesubdir = cnf.get("Clean-Suites::MorgueSubDir", 'pool')

    # Build directory as morguedir/morguesubdir/year/month/day
    dest = os.path.join(morguedir,
                        morguesubdir,
                        str(now_date.year),
                        '%.2d' % now_date.month,
                        '%.2d' % now_date.day)

    if not Options["No-Action"] and not os.path.exists(dest):
        os.makedirs(dest)

    # Delete from source
    Logger.log(["Deleting from source table..."])
    q = session.execute("""
      WITH
      deleted_sources AS (
        DELETE FROM source
         USING files f
         WHERE source.file = f.id
           AND NOT EXISTS (SELECT 1 FROM files_archive_map af
                                    JOIN archive_delete_date ad ON af.archive_id = ad.archive_id
                                   WHERE af.file_id = source.file
                                     AND (af.last_used IS NULL OR af.last_used > ad.delete_date))
        RETURNING source.id AS id, f.filename AS filename
      ),
      deleted_dsc_files AS (
        DELETE FROM dsc_files df WHERE df.source IN (SELECT id FROM deleted_sources)
        RETURNING df.file AS file_id
      ),
      now_unused_source_files AS (
        UPDATE files_archive_map af
           SET last_used = '1977-03-13 13:37:42' -- Kill it now. We waited long enough before removing the .dsc.
         WHERE af.file_id IN (SELECT file_id FROM deleted_dsc_files)
           AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = af.file_id)
      )
      SELECT filename FROM deleted_sources""")
    for s in q:
        Logger.log(["delete source", s[0]])

    if not Options["No-Action"]:
        session.commit()

    # Delete files from the pool
    old_files = session.query(ArchiveFile).filter(sql.text('files_archive_map.last_used <= (SELECT delete_date FROM archive_delete_date ad WHERE ad.archive_id = files_archive_map.archive_id)')).join(Archive)
    if max_delete is not None:
        old_files = old_files.limit(max_delete)
        Logger.log(["Limiting removals to %d" % max_delete])

    if archives is not None:
        archive_ids = [ a.archive_id for a in archives ]
        old_files = old_files.filter(ArchiveFile.archive_id.in_(archive_ids))

    for af in old_files:
        filename = af.path
        if not os.path.exists(filename):
            Logger.log(["database referred to non-existing file", af.path])
            session.delete(af)
            continue
        Logger.log(["delete archive file", filename])
        if os.path.isfile(filename):
            if os.path.islink(filename):
                count += 1
                Logger.log(["delete symlink", filename])
                if not Options["No-Action"]:
                    os.unlink(filename)
            else:
                size += os.stat(filename)[stat.ST_SIZE]
                count += 1

                dest_filename = dest + '/' + os.path.basename(filename)
                # If the destination file exists; try to find another filename to use
                if os.path.lexists(dest_filename):
                    dest_filename = utils.find_next_free(dest_filename)

                if not Options["No-Action"]:
                    if af.archive.use_morgue:
                        Logger.log(["move to morgue", filename, dest_filename])
                        utils.move(filename, dest_filename)
                    else:
                        Logger.log(["removed file", filename])
                        os.unlink(filename)

            if not Options["No-Action"]:
                session.delete(af)
                session.commit()

        else:
            utils.fubar("%s is neither symlink nor file?!" % (filename))

    if count > 0:
        Logger.log(["total", count, utils.size_type(size)])

    # Delete entries in files no longer referenced by any archive
    query = """
       DELETE FROM files f
        WHERE NOT EXISTS (SELECT 1 FROM files_archive_map af WHERE af.file_id = f.id)
    """
    session.execute(query)

    if not Options["No-Action"]:
        session.commit()
Example #45
0
 def resolve_get_user(self, info, username):
     res = db.session.query(User).filter(text("username ='******'")).first()
     if res:
         return res
     else:
         return None;
Example #46
0
def main():
    import gp
    from sklearn.feature_extraction import DictVectorizer

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-g',
        '--gpu-split',
        type=float,
        default=1,
        help="Num ways we'll split the GPU (how many tabs you running?)")
    parser.add_argument('-n',
                        '--net-type',
                        type=str,
                        default='conv2d',
                        help="(lstm|conv2d) Which network arch to use")
    parser.add_argument(
        '--guess',
        type=int,
        default=-1,
        help="Run the hard-coded 'guess' values first before exploring")
    parser.add_argument(
        '--boost',
        action="store_true",
        default=False,
        help=
        "Use custom gradient-boosting optimization, or bayesian optimization?")
    args = parser.parse_args()

    # Encode features
    hsearch = HSearchEnv(gpu_split=args.gpu_split, net_type=args.net_type)
    hypers_, hardcoded = hsearch.hypers, hsearch.hardcoded
    hypers_ = {k: v for k, v in hypers_.items() if k not in hardcoded}
    hsearch.close()

    # Build a matrix of features,  length = max feature size
    max_num_vals = 0
    for v in hypers_.values():
        l = len(v['vals'])
        if l > max_num_vals: max_num_vals = l
    empty_obj = {k: None for k in hypers_}
    mat = pd.DataFrame([empty_obj.copy() for _ in range(max_num_vals)])
    for k, hyper in hypers_.items():
        for i, v in enumerate(hyper['vals']):
            mat.loc[i, k] = v
    mat.ffill(inplace=True)

    # Above is Pandas-friendly stuff, now convert to sklearn-friendly & pipe through OneHotEncoder
    vectorizer = DictVectorizer()
    vectorizer.fit(mat.T.to_dict().values())
    feat_names = vectorizer.get_feature_names()

    # Map TensorForce actions to GP-compatible `domain`
    # instantiate just to get actions (get them from hypers above?)
    bounds = []
    for k in feat_names:
        hyper = hypers_.get(k, False)
        if hyper:
            bounded, min_, max_ = hyper['type'] == 'bounded', min(
                hyper['vals']), max(hyper['vals'])
        b = [min_, max_] if bounded else [0, 1]
        bounds.append(b)

    def hypers2vec(obj):
        h = dict()
        for k, v in obj.items():
            if k in hardcoded: continue
            if type(v) == bool: h[k] = float(v)
            else: h[k] = v or 0.
        return vectorizer.transform(h).toarray()[0]

    def vec2hypers(vec):
        # Reverse the encoding
        # https://stackoverflow.com/questions/22548731/how-to-reverse-sklearn-onehotencoder-transform-to-recover-original-data
        # https://github.com/scikit-learn/scikit-learn/issues/4414
        reversed = vectorizer.inverse_transform([vec])[0]
        obj = {}
        for k, v in reversed.items():
            if '=' not in k:
                obj[k] = v
                continue
            if k in obj: continue  # we already handled this x=y logic (below)
            # Find the winner (max) option for this key
            score, attr, val = v, k.split('=')[0], k.split('=')[1]
            for k2, score2 in reversed.items():
                if k2.startswith(attr + '=') and score2 > score:
                    score, val = score2, k2.split('=')[1]
            obj[attr] = val

        # Bools come in as floats. Also, if the result is False they don't come in at all! So we start iterate
        # hypers now instead of nesting this logic in reversed-iteration above
        for k, v in hypers_.items():
            if v['type'] == 'bool':
                obj[k] = bool(round(obj.get(k, 0.)))
        return obj

    # Specify the "loss" function (which we'll maximize) as a single rl_hsearch instantiate-and-run
    def loss_fn(params):
        hsearch = HSearchEnv(gpu_split=args.gpu_split, net_type=args.net_type)
        reward = hsearch.execute(vec2hypers(params))
        hsearch.close()
        return [reward]

    guess_i = 0
    while True:
        # Every iteration, re-fetch from the database & pre-train new model. Acts same as saving/loading a model to disk,
        # but this allows to distribute across servers easily
        conn_runs = data.engine_runs.connect()
        sql = "select hypers, advantages, advantage_avg from runs where flag=:f"
        runs = conn_runs.execute(text(sql), f=args.net_type).fetchall()
        conn_runs.close()
        X, Y = [], []
        for run in runs:
            X.append(hypers2vec(run.hypers))
            Y.append([utils.calculate_score(run)])
        boost_model = print_feature_importances(X, Y, feat_names)

        if args.guess != -1:
            guess = {k: v['guess'] for k, v in hypers_.items()}
            guess.update(utils.guess_overrides[args.guess][guess_i])
            loss_fn(hypers2vec(guess))

            guess_i += 1
            if guess_i > len(utils.guess_overrides[args.guess]) - 1:
                args.guess = -1  # start on GP

            continue

        if args.boost:
            print('Using gradient-boosting')
            boost_optimization(model=boost_model,
                               loss_fn=loss_fn,
                               bounds=np.array(bounds),
                               x_list=X,
                               y_list=Y)
        else:
            # Evidently duplicate values break GP. Many of these are ints, so they're definite duplicates. Either way,
            # tack on some small epsilon to make them different (1e-6 < gp.py's min threshold, make sure that #'s not a
            # problem). I'm concerned about this since many hypers can go below that epislon (eg learning-rate).
            for x in X:
                for i, v in enumerate(x):
                    x[i] += np.random.random() * 1e-6
            gp.bayesian_optimisation2(loss_fn=loss_fn,
                                      bounds=np.array(bounds),
                                      x_list=X,
                                      y_list=Y)
Example #47
0
def upgrade():
    ### commands auto generated by Alembic - please adjust! ###
    op.create_table(
        "roles_authorities",
        sa.Column("authority_id", sa.Integer(), nullable=True),
        sa.Column("role_id", sa.Integer(), nullable=True),
        sa.ForeignKeyConstraint(["authority_id"], ["authorities.id"]),
        sa.ForeignKeyConstraint(["role_id"], ["roles.id"]),
    )
    op.create_index(
        "roles_authorities_ix",
        "roles_authorities",
        ["authority_id", "role_id"],
        unique=True,
    )
    op.create_table(
        "roles_certificates",
        sa.Column("certificate_id", sa.Integer(), nullable=True),
        sa.Column("role_id", sa.Integer(), nullable=True),
        sa.ForeignKeyConstraint(["certificate_id"], ["certificates.id"]),
        sa.ForeignKeyConstraint(["role_id"], ["roles.id"]),
    )
    op.create_index(
        "roles_certificates_ix",
        "roles_certificates",
        ["certificate_id", "role_id"],
        unique=True,
    )
    op.create_index(
        "certificate_associations_ix",
        "certificate_associations",
        ["domain_id", "certificate_id"],
        unique=True,
    )
    op.create_index(
        "certificate_destination_associations_ix",
        "certificate_destination_associations",
        ["destination_id", "certificate_id"],
        unique=True,
    )
    op.create_index(
        "certificate_notification_associations_ix",
        "certificate_notification_associations",
        ["notification_id", "certificate_id"],
        unique=True,
    )
    op.create_index(
        "certificate_replacement_associations_ix",
        "certificate_replacement_associations",
        ["certificate_id", "certificate_id"],
        unique=True,
    )
    op.create_index(
        "certificate_source_associations_ix",
        "certificate_source_associations",
        ["source_id", "certificate_id"],
        unique=True,
    )
    op.create_index("roles_users_ix",
                    "roles_users", ["user_id", "role_id"],
                    unique=True)

    ### end Alembic commands ###

    # migrate existing authority_id relationship to many_to_many
    conn = op.get_bind()
    for id, authority_id in conn.execute(
            text(
                "select id, authority_id from roles where authority_id is not null"
            )):
        stmt = text(
            "insert into roles_authoritties (role_id, authority_id) values (:role_id, :authority_id)"
        )
        stmt = stmt.bindparams(role_id=id, authority_id=authority_id)
        op.execute(stmt)
Example #48
0
try:
    rows = rows2 = 0
    self.session.begin()
    for rpu in recprops:
        # update queryable column and XML document via XPath
        if 'xpath' not in rpu['rp']:
            self.session.rollback()
            raise RuntimeError('XPath not found for property %s' %
                               rpu['rp']['name'])
        if 'dbcol' not in rpu['rp']:
            self.session.rollback()
            raise RuntimeError('property not found for XPath %s' %
                               rpu['rp']['name'])
        rows += self._get_repo_filter(
            self.session.query(self.dataset)
        ).filter(text(constraint['where'])).params(
            self._create_values(constraint['values'])).update(
                {
                    getattr(self.dataset, rpu['rp']['dbcol']):
                    rpu['value'],
                    'xml':
                    func.update_xpath(
                        str(self.context.namespaces),
                        getattr(
                            self.dataset,
                            self.context.md_core_model['mappings']
                            ['pycsw:XML']), str(rpu)),
                },
                synchronize_session='fetch')
        # then update anytext tokens
        rows2 += self._get_repo_filter(
Example #49
0
class RowCount(db.Model):

    __tablename__ = "row_counts"

    table_name = Column(Text, nullable=False, unique=True)
    count = Column(BigInteger, nullable=False, server_default=sql.text("0"))
Example #50
0
 def _refresh_search_index(db_session):
     db_session.execute(
         text('REFRESH MATERIALIZED VIEW notes_fts_index'))
     db_session.execute(
         text('REFRESH MATERIALIZED VIEW advisor_author_index'))
     std_commit(session=db_session)
Example #51
0
 def get_view_names(self, connection, schema=None, **kw):
     schema = self.denormalize_name(schema or self.default_schema_name)
     s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner")
     cursor = connection.execute(s, owner=self.denormalize_name(schema))
     return [self.normalize_name(row[0]) for row in cursor]
Example #52
0
class UserSession(Base):
    """
    API keys/session cookies for the app

    There are two types of sessions: long-lived, and short-lived. Long-lived are
    like when you choose "remember this browser": they will be valid for a long
    time without the user interacting with the site. Short-lived sessions on the
    other hand get invalidated quickly if the user does not interact with the
    site.

    Long-lived tokens are valid from `created` until `expiry`.

    Short-lived tokens expire after 168 hours (7 days) if they are not used.
    """

    __tablename__ = "sessions"
    token = Column(String, primary_key=True)

    user_id = Column(ForeignKey("users.id"), nullable=False, index=True)

    # whether it's a long-lived or short-lived session
    long_lived = Column(Boolean, nullable=False)

    # the time at which the session was created
    created = Column(DateTime(timezone=True),
                     nullable=False,
                     server_default=func.now())

    # the expiry of the session: the session *cannot* be refreshed past this
    expiry = Column(DateTime(timezone=True),
                    nullable=False,
                    server_default=func.now() + text("interval '90 days'"))

    # the time at which the token was invalidated, allows users to delete sessions
    deleted = Column(DateTime(timezone=True), nullable=True, default=None)

    # the last time this session was used
    last_seen = Column(DateTime(timezone=True),
                       nullable=False,
                       server_default=func.now())

    # count of api calls made with this token/session (if we're updating last_seen, might as well update this too)
    api_calls = Column(Integer, nullable=False, default=0)

    # details of the browser, if available
    # these are from the request creating the session, not used for anything else
    ip_address = Column(String, nullable=True)
    user_agent = Column(String, nullable=True)

    user = relationship("User", backref="sessions")

    @hybrid_property
    def is_valid(self):
        """
        It must have been created and not be expired or deleted.

        Also, if it's a short lived token, it must have been used in the last 168 hours.

        TODO: this probably won't run in python (instance level), only in sql (class level)
        """
        return ((self.created <= func.now())
                & (self.expiry >= func.now())
                & (self.deleted == None)
                &
                (self.long_lived |
                 (func.now() - self.last_seen < text("interval '168 hours'"))))
Example #53
0
def check_reverse_depends(removals,
                          suite,
                          arches=None,
                          session=None,
                          cruft=False,
                          quiet=False,
                          include_arch_all=True):
    dbsuite = get_suite(suite, session)
    overridesuite = dbsuite
    if dbsuite.overridesuite is not None:
        overridesuite = get_suite(dbsuite.overridesuite, session)
    dep_problem = 0
    p2c = {}
    all_broken = defaultdict(lambda: defaultdict(set))
    if arches:
        all_arches = set(arches)
    else:
        all_arches = set(x.arch_string for x in get_suite_architectures(suite))
    all_arches -= set(["source", "all"])
    removal_set = set(removals)
    metakey_d = get_or_set_metadatakey("Depends", session)
    metakey_p = get_or_set_metadatakey("Provides", session)
    params = {
        'suite_id': dbsuite.suite_id,
        'metakey_d_id': metakey_d.key_id,
        'metakey_p_id': metakey_p.key_id,
    }
    if include_arch_all:
        rdep_architectures = all_arches | set(['all'])
    else:
        rdep_architectures = all_arches
    for architecture in rdep_architectures:
        deps = {}
        sources = {}
        virtual_packages = {}
        try:
            params['arch_id'] = get_architecture(architecture, session).arch_id
        except AttributeError:
            continue

        statement = sql.text('''
            SELECT b.package, s.source, c.name as component,
                (SELECT bmd.value FROM binaries_metadata bmd WHERE bmd.bin_id = b.id AND bmd.key_id = :metakey_d_id) AS depends,
                (SELECT bmp.value FROM binaries_metadata bmp WHERE bmp.bin_id = b.id AND bmp.key_id = :metakey_p_id) AS provides
                FROM binaries b
                JOIN bin_associations ba ON b.id = ba.bin AND ba.suite = :suite_id
                JOIN source s ON b.source = s.id
                JOIN files_archive_map af ON b.file = af.file_id
                JOIN component c ON af.component_id = c.id
                WHERE b.architecture = :arch_id''')
        query = session.query('package', 'source', 'component', 'depends', 'provides'). \
            from_statement(statement).params(params)
        for package, source, component, depends, provides in query:
            sources[package] = source
            p2c[package] = component
            if depends is not None:
                deps[package] = depends
            # Maintain a counter for each virtual package.  If a
            # Provides: exists, set the counter to 0 and count all
            # provides by a package not in the list for removal.
            # If the counter stays 0 at the end, we know that only
            # the to-be-removed packages provided this virtual
            # package.
            if provides is not None:
                for virtual_pkg in provides.split(","):
                    virtual_pkg = virtual_pkg.strip()
                    if virtual_pkg == package:
                        continue
                    if virtual_pkg not in virtual_packages:
                        virtual_packages[virtual_pkg] = 0
                    if package not in removals:
                        virtual_packages[virtual_pkg] += 1

        # If a virtual package is only provided by the to-be-removed
        # packages, treat the virtual package as to-be-removed too.
        removal_set.update(virtual_pkg for virtual_pkg in virtual_packages
                           if not virtual_packages[virtual_pkg])

        # Check binary dependencies (Depends)
        for package in deps:
            if package in removals:
                continue
            try:
                parsed_dep = apt_pkg.parse_depends(deps[package])
            except ValueError as e:
                print("Error for package %s: %s" % (package, e))
                parsed_dep = []
            for dep in parsed_dep:
                # Check for partial breakage.  If a package has a ORed
                # dependency, there is only a dependency problem if all
                # packages in the ORed depends will be removed.
                unsat = 0
                for dep_package, _, _ in dep:
                    if dep_package in removals:
                        unsat += 1
                if unsat == len(dep):
                    component = p2c[package]
                    source = sources[package]
                    if component != "main":
                        source = "%s/%s" % (source, component)
                    all_broken[source][package].add(architecture)
                    dep_problem = 1

    if all_broken and not quiet:
        if cruft:
            print("  - broken Depends:")
        else:
            print("# Broken Depends:")
        for source, bindict in sorted(all_broken.items()):
            lines = []
            for binary, arches in sorted(bindict.items()):
                if arches == all_arches or 'all' in arches:
                    lines.append(binary)
                else:
                    lines.append('%s [%s]' %
                                 (binary, ' '.join(sorted(arches))))
            if cruft:
                print('    %s: %s' % (source, lines[0]))
            else:
                print('%s: %s' % (source, lines[0]))
            for line in lines[1:]:
                if cruft:
                    print('    ' + ' ' * (len(source) + 2) + line)
                else:
                    print(' ' * (len(source) + 2) + line)
        if not cruft:
            print()

    # Check source dependencies (Build-Depends and Build-Depends-Indep)
    all_broken = defaultdict(set)
    metakey_bd = get_or_set_metadatakey("Build-Depends", session)
    metakey_bdi = get_or_set_metadatakey("Build-Depends-Indep", session)
    if include_arch_all:
        metakey_ids = (metakey_bd.key_id, metakey_bdi.key_id)
    else:
        metakey_ids = (metakey_bd.key_id, )

    params = {
        'suite_id': dbsuite.suite_id,
        'metakey_ids': metakey_ids,
    }
    statement = sql.text('''
        SELECT s.source, string_agg(sm.value, ', ') as build_dep
           FROM source s
           JOIN source_metadata sm ON s.id = sm.src_id
           WHERE s.id in
               (SELECT src FROM newest_src_association
                   WHERE suite = :suite_id)
               AND sm.key_id in :metakey_ids
           GROUP BY s.id, s.source''')
    query = session.query('source', 'build_dep').from_statement(statement). \
        params(params)
    for source, build_dep in query:
        if source in removals:
            continue
        parsed_dep = []
        if build_dep is not None:
            # Remove [arch] information since we want to see breakage on all arches
            build_dep = re_build_dep_arch.sub("", build_dep)
            try:
                parsed_dep = apt_pkg.parse_src_depends(build_dep)
            except ValueError as e:
                print("Error for source %s: %s" % (source, e))
        for dep in parsed_dep:
            unsat = 0
            for dep_package, _, _ in dep:
                if dep_package in removals:
                    unsat += 1
            if unsat == len(dep):
                component, = session.query(Component.component_name) \
                    .join(Component.overrides) \
                    .filter(Override.suite == overridesuite) \
                    .filter(Override.package == re.sub('/(contrib|non-free)$', '', source)) \
                    .join(Override.overridetype).filter(OverrideType.overridetype == 'dsc') \
                    .first()
                key = source
                if component != "main":
                    key = "%s/%s" % (source, component)
                all_broken[key].add(pp_deps(dep))
                dep_problem = 1

    if all_broken and not quiet:
        if cruft:
            print("  - broken Build-Depends:")
        else:
            print("# Broken Build-Depends:")
        for source, bdeps in sorted(all_broken.items()):
            bdeps = sorted(bdeps)
            if cruft:
                print('    %s: %s' % (source, bdeps[0]))
            else:
                print('%s: %s' % (source, bdeps[0]))
            for bdep in bdeps[1:]:
                if cruft:
                    print('    ' + ' ' * (len(source) + 2) + bdep)
                else:
                    print(' ' * (len(source) + 2) + bdep)
        if not cruft:
            print()

    return dep_problem
Example #54
0
    def get_columns(self, connection, table_name, schema=None, **kw):
        """

        kw arguments can be:

            oracle_resolve_synonyms

            dblink

        """

        resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
        dblink = kw.get('dblink', '')
        info_cache = kw.get('info_cache')

        (table_name, schema, dblink, synonym) = \
            self._prepare_reflection_args(connection, table_name, schema,
                                          resolve_synonyms, dblink,
                                          info_cache=info_cache)
        columns = []
        if self._supports_char_length:
            char_length_col = 'char_length'
        else:
            char_length_col = 'data_length'

        c = connection.execute(sql.text(
                "SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, "
                "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "
                "WHERE table_name = :table_name AND owner = :owner "
                "ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}),
                               table_name=table_name, owner=schema)

        for row in c:
            (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \
                (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6])

            if coltype == 'NUMBER' :
                coltype = NUMBER(precision, scale)
            elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
                coltype = self.ischema_names.get(coltype)(length)
            elif 'WITH TIME ZONE' in coltype:
                coltype = TIMESTAMP(timezone=True)
            else:
                coltype = re.sub(r'\(\d+\)', '', coltype)
                try:
                    coltype = self.ischema_names[coltype]
                except KeyError:
                    util.warn("Did not recognize type '%s' of column '%s'" %
                              (coltype, colname))
                    coltype = sqltypes.NULLTYPE

            cdict = {
                'name': colname,
                'type': coltype,
                'nullable': nullable,
                'default': default,
                'autoincrement':default is None
            }
            if orig_colname.lower() == orig_colname:
                cdict['quote'] = True

            columns.append(cdict)
        return columns
Example #55
0
 def get_time_filter(self, start_dttm, end_dttm):
     col = self.sqla_col.label('__time')
     return and_(
         col >= text(self.dttm_sql_literal(start_dttm)),
         col <= text(self.dttm_sql_literal(end_dttm)),
     )
Example #56
0
 def has_table(self, connection, tablename, schema=None):
     result = connection.scalar(sql.text(
         "select * from INFORMATION_SCHEMA.`TABLES` where "
         "name=:name"),
                                name=tablename)
     return bool(result)
Example #57
0
def get_users_w_project():
    stmt = text("SELECT Account.id, Account.name, Account.username, Projekti.name AS projekti, Userproject.onasiakas AS onasiakas, Userproject.paaprojekti FROM account INNER JOIN Userproject ON Userproject.account_id = Account.id INNER JOIN Projekti ON Projekti.id = Userproject.project_id GROUP BY Account.id, Projekti.name, Userproject.onasiakas, Userproject.paaprojekti ORDER BY Account.name ASC")
    res = db.session().execute(stmt)
    return res
Example #58
0

#######################
# Groups
#######################
# we"re using a very normalized form here to represent all kinds of
# groups and their relations to other things in the system.

groups = rdb.Table("groups", metadata,
    rdb.Column("group_id", rdb.Integer, PrincipalSequence, primary_key=True),
    rdb.Column("short_name", rdb.Unicode(256), nullable=False),
    rdb.Column("full_name", rdb.Unicode(256)),
    rdb.Column("description", rdb.UnicodeText),
    rdb.Column("status", rdb.Unicode(32)), # workflow for groups
    rdb.Column("status_date", rdb.DateTime(timezone=False),
        server_default=(text("now()")),
        nullable=False
    ),
    rdb.Column("start_date", rdb.Date, nullable=False),
    rdb.Column("end_date", rdb.Date),
    rdb.Column("type", rdb.String(30), nullable=False),
    rdb.Column("parent_group_id", rdb.Integer,
        rdb.ForeignKey("groups.group_id")
     ),
    rdb.Column("language", rdb.String(5), nullable=False),
)

offices = rdb.Table("offices", metadata,
    rdb.Column("office_id", rdb.Integer,
        rdb.ForeignKey("groups.group_id"),
        primary_key=True),
Example #59
0
 def test_create_index_expressions(self):
     context = op_fixture()
     op.create_index("geocoded", "locations", [text("lower(coordinates)")])
     context.assert_(
         "CREATE INDEX geocoded ON locations (lower(coordinates))")
Example #60
0
def get_users_per_project():
    stmt = text("SELECT COUNT(Account.id) AS maara, Projekti.name AS projekti FROM Account INNER JOIN Userproject ON Userproject.account_id = Account.id AND Userproject.paaprojekti = :true INNER JOIN Projekti ON Projekti.id = Userproject.project_id GROUP BY Projekti.name").params(true = True)
    res = db.session().execute(stmt)
    return res