def search():
    es = pyes.ES('127.0.0.1:9200') 
    query_string = request.GET.get('query')
    query = pyes.StringQuery(query_string)
    result = es.search(query=query)
    bill_list = []
    conn = engine.connect()
    for res in result['hits']['hits']:
        id = res['_id']
        bls = select([bills,bill_revs],
            and_(
                bill_revs.c.id == id,
                bill_revs.c.bill_id==bills.c.id
            )
        ).apply_labels()

        conn = engine.connect()
    
        result = conn.execute(bls)
        bl = result.fetchone()
        if not bl:
            print id
            continue 
        bill = utils.get_bill(bl) 
        bill_list.append(bill)

    return dict(bill=bill_list)
Exemple #2
0
def main():
    sys.path.append(os.path.dirname(os.path.realpath(__file__)))
    tornado.options.parse_command_line()
    app = Application(engine.connect())
    http_server = HTTPServer(app)
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.instance().start()
Exemple #3
0
def getadverts():
    i = 1
    connection = engine.connect()
    rs = connection.execute("select campaign_id, product, campaign_name,"
                            + "SUM(CASE WHEN (station_type_desc='Local Station') THEN 1 ELSE 0 END) AS LocalStation, "
                            + "SUM(CASE WHEN (station_type_desc='Radio') THEN 1 ELSE 0 END) AS Radio "
                            + "from ( "
                            + "select cmp.campaign_id, product, campaign_name, station_name, station_type_desc "
                            + "from medmo.campaigns cmp left join medmo.campaigndetails cmpd ON cmp.campaign_id = cmpd.campaign_id "
                            + "left join medmo.stations stn ON cmpd.station_id = stn.station_id "
                            + "left join medmo.stationtypes stntp ON stn.station_type_id = stntp.station_type_id "
                            + "where cmp.createdby_user_id = " + str(current_user.user_id)
                            + ") as summary group by product, campaign_name order by campaign_id desc;")
    pyadverts = []
    for row in rs:
        if i % 2 == 1:
            style = 'oddcard'
        else:
            style = 'evencard'
        # print row
        d = collections.OrderedDict()
        d['id'] = row.campaign_id
        d['product'] = row.product
        d['campaign'] = row.campaign_name
        d['tvStations'] = row.LocalStation
        d['radioStations'] = row.Radio
        d['style'] = style
        pyadverts.append(d)
        i += 1
    return Response(json.dumps(pyadverts), mimetype='application/json', headers={'Cache-Control': 'no-cache'})
def feed():
    prefix = '://'.join(request.urlparts[:2])
    title = 'Malaysian Bill Watcher'
    link = prefix+'/'
    description = '''
        This is an app for Malaysian to see bill being passed by the Parliament
    '''
    lastBuildDate = datetime.datetime.utcnow()
    
    li = []
    bls = select([bills,bill_revs],bills.c.id==bill_revs.c.bill_id).\
        order_by('update_date').apply_labels()
    conn = engine.connect()
    result = conn.execute(bls)
    bill = result.fetchall()    
    for i in bill:
        i_bill = utils.get_bill(i)
        i_title = i_bill.long_name
        i_description = "year:%s \nstatus: %s" % (i_bill.year,i_bill.status)
        i_link = prefix+'/detail/%s/' % (i_bill.id)
        i_pubDate = i_bill.update_date
        i_guid = PyRSS2Gen.Guid(i_link)
        itm = PyRSS2Gen.RSSItem(title=i_title,description=i_description,
            link=i_link,guid=i_guid,pubDate=i_pubDate)
        li.append(itm)
    rss = PyRSS2Gen.RSS2(title=title,link=link,description=description,
        items = li)
    output = cStringIO.StringIO()
    rss.write_xml(output)
    response.content_type = 'application/rss+xml'
    return output.getvalue()
Exemple #5
0
 def update_mysql_data(self, engine, schema, table_name, data_dict, where,
                       logger):
     # sql_comment = 'UPDATE %s SET ' % table_name + ','.join(['%s=%r' % (k, data_dict[k]) for k in data_dict]) + ' WHERE %s=%r;' % (where[0], where[1])
     # cursor = session.execute(sql_comment)
     # session.commit()
     # result = cursor.lastrowid
     # print("cursor",cursor)
     # print("result",result)
     # print(cursor.lastrowid)
     try:
         # 绑定引擎
         metadata = MetaData(engine)
         # 连接数据表
         tb_bdm_employee = Table(table_name, metadata, autoload=True)
         # 连接引擎
         conn = engine.connect()
         ins = tb_bdm_employee.update().where(
             schema.Column(where[0]) == where[1]).values(
                 **data_dict
             )  # table.update().where(table.c.id==7).values(name='foo')
         # 传递参数并执行语句
         result = conn.execute(ins)
         return result
     except Exception as e:
         print("error_update_dict:", data_dict)
         file_path = os.path.join(
             sys.path[0], "logs", "%s.log" % datetime.datetime.strftime(
                 datetime.datetime.now(), "%Y_%m_%d"))
         log = logger(filename=file_path)
         log.removeHandler(log.handlers)
         log.info(e.__str__())
         traceback.print_exc()
         return None
Exemple #6
0
def getadvertdetails():
    advert_id = request.args['a']
    connection = engine.connect()
    i = 0
    rs = connection.execute("SELECT cmpd.campaign_id, cmpd.station_id, station_name, station_type_desc, stn.station_type_id, "
                            + "schedule_dates FROM medmo.campaigndetails cmpd join medmo.stations stn ON "
                            + "cmpd.station_id = stn.station_id join medmo.stationtypes stntp ON "
                            + "stn.station_type_id = stntp.station_type_id WHERE cmpd.campaign_id = " + str(advert_id)
                            + " ORDER BY station_type_id, station_id;")
    advertdetails = []
    for row in rs:
        if i % 2 == 1:
            style = 'oddcard'
        else:
            style = 'evencard'
        d = collections.OrderedDict()
        d['campaign_id'] = row.campaign_id
        d['station_id'] = row.station_id
        d['station_name'] = row.station_name
        d['station_type_desc'] = row.station_type_desc
        d['station_type_id'] = row.station_type_id
        d['schedule_dates'] = row.schedule_dates
        d['style'] = style
        advertdetails.append(d)
    return Response(json.dumps(advertdetails), mimetype='application/json', headers={'Cache-Control': 'no-cache'})
Exemple #7
0
    def insert_mysql_data(self, engine, table_name, data_dict, logger):
        # sql_comment = "insert into  {table_name} {filed_list}  values {data_tuple}".format(
        #                 table_name=table_name,filed_list=filed_list, data_tuple=data_tuple)
        # sql_comment = "insert into tb_bdm_employee 'EmployeeID'  values 5"
        # # print(sql_comment)
        # cursor = session.execute(sql_comment)
        # session.commit()
        # result = cursor.lastrowid
        # print(cursor.lastrowid)
        try:
            # 绑定引擎
            metadata = MetaData(engine)
            # 连接数据表
            tb_bdm_employee = Table(table_name, metadata, autoload=True)
            # 连接引擎
            conn = engine.connect()
            ins = tb_bdm_employee.insert()
            # 传递参数并执行语句
            result = conn.execute(ins, **data_dict)
            return result.lastrowid

        except Exception as e:
            traceback.print_exc()
            file_path = os.path.join(
                sys.path[0], "logs", "%s.log" % datetime.datetime.strftime(
                    datetime.datetime.now(), "%Y_%m_%d"))
            log = logger(filename=file_path)
            log.removeHandler(log.handlers)
            log.info(e.__str__())
            traceback.print_exc()
            return None
Exemple #8
0
def feed():
    prefix = '://'.join(request.urlparts[:2])
    title = 'Malaysian Bill Watcher'
    link = prefix+'/'
    description = '''
        This is an app for Malaysian to see bill being passed by the Parliament
    '''
    lastBuildDate = datetime.datetime.utcnow()
    
    li = []
    bls = select([bills,bill_revs],bills.c.id==bill_revs.c.bill_id).\
        order_by('update_date')
    conn = engine.connect()
    result = conn.execute(bls)
    bill = result.fetchall()    
    for i in bill:
        i_title = i['long_name']
        i_description = "year:%s \nstatus: %s" % (i['year'],i['status'])
        i_link = prefix+'/detail/%s/' % (i['bill_id'])
        i_pubDate = i['update_date']
        i_guid = PyRSS2Gen.Guid(i_link)
        itm = PyRSS2Gen.RSSItem(title=i_title,description=i_description,
            link=i_link,guid=i_guid,pubDate=i_pubDate)
        li.append(itm)
    rss = PyRSS2Gen.RSS2(title=title,link=link,description=description,
        items = li)
    output = cStringIO.StringIO()
    rss.write_xml(output)
    response.content_type = 'application/rss+xml'
    return output.getvalue()
Exemple #9
0
def list_all():
    print request.GET.keys()
    if request.GET.get('page_no'):
        page_no = int(request.GET.get('page_no'))
    else:
        page_no = 1
    first = (page_no - 1)  *  settings.ITEM_PER_PAGE + 1
    last = settings.ITEM_PER_PAGE * page_no
    bl = select([bills,bill_revs],and_(
        bills.c.id==bill_revs.c.bill_id,
        bills.c.id>=first,bills.c.id<=last
        )
    ).order_by(bill_revs.c.update_date)
    conn = engine.connect()
    result = conn.execute(bl)
    bill = result.fetchall()

    cnt = select([bills])
    result = conn.execute(cnt)
    count = result.fetchall()
   
    page_list = range(len(count) / 5)
    page_list = [i+1 for i in page_list]
    return dict(bill=bill,page_list=page_list,page_no=page_no,
        next_page=page_no+1,prev_page=page_no-1)
Exemple #10
0
def main():
    sys.path.append(os.path.dirname(os.path.realpath(__file__)))
    tornado.options.parse_command_line()
    app = Application(engine.connect())
    http_server = HTTPServer(app)
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.instance().start()
Exemple #11
0
 def db(self):
     if not hasattr(BaseHandler, "_db"):
         #BaseHandler._db = tornado.database.Connection(
         #    host=options.mysql_host, database=options.mysql_database,
         #    user=options.mysql_user, password=options.mysql_password)
         BaseHandler._db = engine.connect()
     return BaseHandler._db
Exemple #12
0
def get_item(id):
    bill_query = select([bills], bills.c.id == id)

    data = {}
    conn = engine.connect()
    result = conn.execute(bill_query)

    bill = result.fetchone()

    for i in bill.keys():
        data[i] = bill[i]

    rev_query = select([bill_revs], bill_revs.c.bill_id == id)
    result = conn.execute(rev_query)

    revision = result.fetchall()
    data['revision'] = []

    for rev in revision:
        temp = {}
        for key in rev.keys():
            if type(rev[key]) == datetime.datetime:
                temp[key] = rev[key].strftime('%d/%m/%Y %H:%M:%s')
            elif type(rev[key]) == datetime.date:
                temp[key] = rev[key].strftime('%d/%m/%Y')
            else:
                temp[key] = rev[key]

        data['revision'].append(temp)

    return data
def get_item(id):
    bill_query = select([bills],bills.c.id==id)
    
    data = {}
    conn = engine.connect()
    result = conn.execute(bill_query)
    
    bill = result.fetchone()
    
    for i in bill.keys():
        data[i] = bill[i]
    
    rev_query = select([bill_revs],bill_revs.c.bill_id==id)
    result = conn.execute(rev_query)
    
    revision = result.fetchall()
    data['revision'] = []
    
    for rev in revision:
        temp = {}
        for key in rev.keys():
            if type(rev[key]) == datetime.datetime:
                temp[key] = rev[key].strftime('%d/%m/%Y %H:%M:%s')
            elif type(rev[key]) == datetime.date:
                temp[key] = rev[key].strftime('%d/%m/%Y') 
            else:
                temp[key] = rev[key]

        data['revision'].append(temp) 
    
    return data
def write_mysql(_id, values):
    """write data into mysql db
    """
    with engine.connect() as conn:
        conn.execute(var_pmid.delete().where(var_pmid.c._id == _id))
        for i in range(0, len(values), 1000):
            batch = values[i:i + 1000]
            conn.execute(var_pmid.insert(), batch)  # pylint: disable=no-value-for-parameter
Exemple #15
0
def all_item():
    id_query = select([bills.c.id])
    conn = engine.connect()
    result = conn.execute(id_query)
    data = []

    ids = result.fetchall()
    for i in ids:
        data.append(get_item(i['id']))
    return {'data': data}
def all_item():
    id_query = select([bills.c.id])
    conn = engine.connect()
    result = conn.execute(id_query)
    data = []
    
    ids = result.fetchall()
    for i in ids:
        data.append(get_item(i['id']))
    return {'data':data}
Exemple #17
0
def get_dump_path(_id):
    dirname = '/app/data/paper_data'
    from models import (engine, paper_status)
    with engine.connect() as conn:
        query = paper_status.select().where(paper_status.c._id == _id)
        row = conn.execute(query).fetchone()
        if row is not None:
            return os.path.join(dirname, row['path'])
        else:
            return os.path.join(dirname, 'unknown')
Exemple #18
0
 def insert_sqlserver_data(self, engine, table_name, data_dict):
     # 绑定引擎
     metadata = MetaData(engine)
     # 连接数据表
     tb_bdm_employee = Table(table_name, metadata, autoload=True)
     # 连接引擎
     conn = engine.connect()
     ins = tb_bdm_employee.insert()
     # 传递参数并执行语句
     result = conn.execute(ins, **data_dict)
     return result
Exemple #19
0
def import_stock_history_ohlcv():
    ss = Session()
    conn = engine.connect()
    stocks = ss.query(Stock.code).all()
    for stock in stocks:
        quote_inserts = read_history_quotes(stock.code)
        for ins in quote_inserts:
            try:
                conn.execute(ins)
            except Exception as e:
                continue
Exemple #20
0
def import_stock_history_ohlcv():
    ss = Session()
    conn = engine.connect()
    stocks = ss.query(Stock.code).all()
    for stock in stocks:
        quote_inserts = read_history_quotes(stock.code)
        for ins in quote_inserts:
            try:
                conn.execute(ins)
            except Exception as e:
                continue
Exemple #21
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.
    """
    with engine.connect() as connection:
        context.configure(connection=connection,
                          target_metadata=target_metadata)

        with context.begin_transaction():
            context.run_migrations()
Exemple #22
0
def detail(id):
    bl = select([bills],bills.c.id == id)
    conn = engine.connect()
    
    result = conn.execute(bl)
    bill = result.fetchone()
    rev = select([bill_revs],bill_revs.c.bill_id == bill['id']).\
        order_by(bill_revs.c.year.desc())
    
    result = conn.execute(rev)
    revision = result.fetchall()
    return dict(bill=bill,revision=revision)
Exemple #23
0
 def update_sqlserver_data(self, engine, table_name, data_dict, where):
     metadata = MetaData(engine)
     # 连接数据表
     tb_bdm_employee = Table(table_name, metadata, autoload=True)
     # 连接引擎
     conn = engine.connect()
     ins = tb_bdm_employee.update().where(
         schema.Column(where[0]) == where[1]).values(
             **data_dict
         )  # table.update().where(table.c.id==7).values(name='foo')
     # 传递参数并执行语句
     result = conn.execute(ins)
     return result
Exemple #24
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.
    
    """

    connection = engine.connect()
    context.configure(connection=connection, target_metadata=metadata)

    try:
        with context.begin_transaction():
            context.run_migrations()
    finally:
        connection.close()
Exemple #25
0
def run_migrations_online():
    """
    Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.
    """
    from models import engine

    with engine.connect() as connection:
        context.configure(
            connection=connection,
            target_metadata=TARGET_METADATA
        )

        with context.begin_transaction():
            context.run_migrations()
Exemple #26
0
def request_followers(application_id: str) -> None:
    """Request the followers for all main accounts. Uses the fetcher service to request followers in parallel"""
    follower_count = 200
    request_limit = 15
    request_chunks = 5
    with engine.connect() as connection:
        requests_left = request_limit

        while requests_left > 0:
            try:
                limit = min(requests_left, request_chunks)
                main_users = list(
                    models.account.select_main_with_followers(
                        application_id, SOURCES['TWITTER'], limit, connection))

                if not main_users:
                    return

                requests_left -= len(main_users)
                request = create_timeline_payload(application_id,
                                                  follower_count, main_users)
                followers_response = urlopen(request)
                followers_response_code = followers_response.getcode()

                if followers_response_code == 200:
                    response = json.loads(read_response(followers_response))

                    for followers in response:
                        result = followers['result']
                        name = followers['screenName']
                        users = result['users']
                        cursor = result['next_cursor']
                        user_followers = [
                            user['screen_name'] for user in users
                            if not user['protected']
                        ]
                        models.account_relationship.insert_multiple(
                            application_id, name, user_followers,
                            SOURCES['TWITTER'], cursor, connection)
                else:
                    raise IOError('Invalid response from Fetcher Service')

            except (ValueError, HTTPException) as e:
                print(e)
            except HTTPError as e:
                print(e.read())
def get_indexable_bills():
    revision = select([bill_revs,bills],bill_revs.c.bill_id==bills.c.id)
    conn = engine.connect()
    result = conn.execute(revision)
    data = result.fetchall()
    for item in data:
        temp = {}
        for key in item.keys():
            if key == 'id':
                continue
            elif key == 'url':
                full_path = download(item[key])
                if not full_path:
                    continue
                temp['document'] = pyes.file_to_attachment(full_path)
            else:
                temp[key] = item[key]
        yield temp
def get_indexable_bills():
    revision = select([bill_revs, bills], bill_revs.c.bill_id == bills.c.id)
    conn = engine.connect()
    result = conn.execute(revision)
    data = result.fetchall()
    for item in data:
        temp = {}
        for key in item.keys():
            if key == 'id':
                continue
            elif key == 'url':
                full_path = download(item[key])
                if not full_path:
                    continue
                temp['document'] = pyes.file_to_attachment(full_path)
            else:
                temp[key] = item[key]
        yield temp
def detail(rev_id):
    bls = select([bills,bill_revs],
        and_(
            bill_revs.c.id == rev_id,
            bill_revs.c.bill_id==bills.c.id
        )
    ).apply_labels()

    conn = engine.connect()
    
    result = conn.execute(bls)
    bl = result.fetchone()
    
    bill = utils.get_bill(bl) 

    revs = select([bill_revs],bill_revs.c.bill_id == bill.bill_id).\
        order_by(bill_revs.c.year.desc())
    
    result = conn.execute(revs)
    revision = result.fetchall()
    
    return dict(bill=bill,revision=revision)
def list_all():
    page_no = request.GET.get('page_no')

    bl = select([bills,bill_revs],and_(
        bills.c.id==bill_revs.c.bill_id,
        )
    ).order_by(bill_revs.c.update_date).apply_labels()


    conn = engine.connect()
    result = conn.execute(bl)
    bill_list = result.fetchall()
    
    bill = []

    for item in bill_list:
        bill.append(utils.get_bill(item))        

    bill_total = len(bill_list)
    pages = utils.Pagination(settings.ITEM_PER_PAGE,bill_total,page_no)
    bill = bill[pages.first:pages.last]

    return dict(bill=bill,pages=pages)
Exemple #31
0
def drop_everything():
    # https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/DropEverything
    conn = engine.connect()

    # the transaction only applies if the DB supports
    # transactional DDL, i.e. Postgresql, MS SQL Server
    trans = conn.begin()

    inspector = reflection.Inspector.from_engine(engine)

    # gather all data first before dropping anything.
    # some DBs lock after things have been dropped in
    # a transaction.

    metadata = MetaData()

    tbs = []
    all_fks = []

    for table_name in inspector.get_table_names():
        fks = []
        for fk in inspector.get_foreign_keys(table_name):
            if not fk['name']:
                continue
            fks.append(ForeignKeyConstraint((), (), name=fk['name']))
        t = Table(table_name, metadata, *fks)
        tbs.append(t)
        all_fks.extend(fks)

    for fkc in all_fks:
        conn.execute(DropConstraint(fkc))

    for table in tbs:
        conn.execute(DropTable(table))

    trans.commit()
Exemple #32
0
 def update_mysql_data(self, engine, schema, table_name, data_dict, where, logger):
     try:
         # 绑定引擎
         metadata = MetaData(engine)
         # 连接数据表
         tb_bdm_employee = Table(table_name, metadata, autoload=True)
         # address_table = Table('address', metadata, autoload=True)
         # 连接引擎
         conn = engine.connect()
         # print('where', where)
         # print('data_dict', data_dict)
         ins = tb_bdm_employee.update().where(schema.Column(where[0]) == where[1]).values(
             **data_dict)  # table.update().where(table.c.id==7).values(name='foo')
         # 传递参数并执行语句
         result = conn.execute(ins)
         return result
     except Exception as e:
         file_path = os.path.join(sys.path[0], "logs",
                                  "%s.log" % datetime.datetime.strftime(datetime.datetime.now(), "%Y_%m_%d"))
         log = logger(filename=file_path)
         log.removeHandler(log.handlers)
         log.info(e.__str__())
         traceback.print_exc()
         return None
Exemple #33
0
def addBook():
    bkName = request.form.get('bkName', False)
    bkAuthor = request.form.get('bkAuthor', False)
    bkPress = request.form.get('bkPress', False)
    bkNum = request.form.get('bkNum', False)
    bkNum = int(bkNum)

    if not (bkName and bkAuthor and bkPress):
        return {'status': 0, 'message': '传入数据不全'}

    if bkNum <= 0:
        return {'status': 0, 'message': '新书数量为零或负数'}

    try:
        con = engine.connect()
        for i in range(bkNum):
            con.execute(
                f"insert into TB_Book (bkName, bkAuthor, bkPress, bkStatus, bkPrice) values ('{bkName}', '{bkAuthor}', '{bkPress}', '在馆', 20)"
            )
        con.close()
    except ProgrammingError:
        return {'status': 0, 'message': '输入的数据可能有误'}
    else:
        return {'status': 1, 'message': '新书入库成功'}
Exemple #34
0
 def get_app(self):
     engine = create_engine("sqlite:///:memory:")
     metadata.bind = engine
     self.conn = engine.connect()
     metadata.create_all()
     return Application(self.conn)
Exemple #35
0
def request_user_timelines(application_id: str,
                           is_main=True,
                           request_limit=1500) -> int:
    """Requests the twitter timeline for all accounts in the database. Either main or not. Uses the fetcher service."""
    request_chunks = 20
    with engine.connect() as connection:
        requests_left = request_limit
        while requests_left > 0:
            limit = min(requests_left, request_chunks)
            account_ids = map_to_attribute(
                'id',
                account_model.select_multiple_incomplete(application_id,
                                                         SOURCES['TWITTER'],
                                                         connection,
                                                         ismain=is_main,
                                                         limit=limit))
            # There are no more accounts that need to be fetched
            if not account_ids:
                return requests_left

            requests_left -= len(account_ids)

            accounts_timeline = account_model.select_oldest_timelines(
                account_ids, connection)
            timeline_payload = create_timeline_payload(accounts_timeline)
            request = create_post_request(TWITTER_TIMELINE, {
                'applicationId': application_id,
                'accounts': timeline_payload
            })

            try:
                timeline_response = urlopen(request)
                timeline_response_code = timeline_response.getcode()

                if timeline_response_code != 200:
                    continue
                raw_response = read_response(timeline_response)
                response = json.loads(raw_response)

                for account in response['success']:
                    with connection.begin():
                        name = account['screenName']
                        timeline = account['timeline']
                        statuses = []
                        if isinstance(timeline, str):
                            continue
                        for status in timeline:
                            statuses.append({
                                'text':
                                status['text'],
                                'id':
                                status['id'],
                                'date':
                                convert_twitter_date(status['created_at'])
                            })

                        if len(statuses) > 1:
                            account_timeline.insert_multiple(
                                application_id, name, SOURCES['TWITTER'],
                                statuses[1:], connection)
                        else:
                            account_id = account_model.select_one_id(
                                application_id, name, SOURCES['TWITTER'],
                                connection)
                            account_model.update_one_iscomplete(
                                account_id, True, connection)
                for res in response['errors']:
                    with connection.begin():
                        name = res['screenName']
                        status = res['status']
                        if status == 404:
                            account_id = account_model.select_one_id(
                                application_id, name, SOURCES['TWITTER'],
                                connection)
                            if not account_id:
                                continue
                            account = account_model.select_one_by_id(
                                account_id, connection)
                            # Delete account from accounts Table if it has been removed from Twitter
                            if not account['isMain']:
                                account_rel_model.delete_follower_account_rel(
                                    account_id, connection)
                                account_model.delete_one_by_id(
                                    account_id, connection)
            except (ValueError, HTTPException) as e:
                print(e)
            except HTTPError as e:
                print(e.read())

        return requests_left
Exemple #36
0
 def get_app(self):
     engine = create_engine("sqlite:///:memory:")
     metadata.bind = engine
     self.conn = engine.connect()
     metadata.create_all()
     return Application(self.conn)
    exit(0)


fout = sys.stdout
if len(sys.argv) > 1:
    fout = open(sys.argv[1], 'w')

gene_name = input('Gene: ')
variant_name = input('Variant: ')
print()

if gene_name.isdigit():
    gene_id = int(gene_name)

else:
    with engine.connect() as conn:
        query = gene.select().where(gene.c.symbol == gene_name)
        rows = conn.execute(query)
        row = next(rows, None)
        if not row:
            error('gene symbol not found')
    gene_id = row[0]

print('GeneID:', gene_id, file=fout)

for j, (mutation_type, rs, pattern) in enumerate(patterns):
    m = re.match(pattern, variant_name)
    if m:
        d = m.groupdict()
        break
else:
Exemple #38
0
Fichier : save.py Projet : hbrls/mx
# -*- coding: utf-8 -*-
def read_data(filename, sep):
    with open(filename) as f:
        for line in f.readlines():
            data = tuple(w.strip() for w in line.strip().split(sep))
            assert len(data) == 2
            yield data


from models import engine
from models.userinfo import UserInfo


userinfo = UserInfo()
ins = userinfo.__table__.insert().values(source="renren_480w")


if __name__ == "__main__":
    data = read_data('cleansed.txt', sep="\t")
    with engine.connect() as conn:
        conn.execute(ins, [{
            "email": d[0],
            "password": d[1]
        } for d in data])

# 7824684
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from vk_functions import search_users, get_photo, sort_likes, json_create
from models import engine, Session, write_msg, register_user, add_user, add_user_photos, add_to_black_list, \
    check_db_user, check_db_black, check_db_favorites, check_db_master, delete_db_blacklist, delete_db_favorites
from vk_config import group_token

# Для работы с вк_апи
vk = vk_api.VkApi(token=group_token)
longpoll = VkLongPoll(vk)
# Для работы с БД
session = Session()
connection = engine.connect()


def loop_bot():
    for this_event in longpoll.listen():
        if this_event.type == VkEventType.MESSAGE_NEW:
            if this_event.to_me:
                message_text = this_event.text
                return message_text, this_event.user_id


def menu_bot(id_num):
    write_msg(id_num,
              f"Вас приветствует бот - Vkinder\n"
              f"\nЕсли вы используете его первый раз - пройдите регистрацию.\n"
              f"Для регистрации введите - Да.\n"
              f"Если вы уже зарегистрированы - начинайте поиск.\n"
              f"\nДля поиска - девушка 18-25 Москва\n"
              f"Перейти в избранное нажмите - 2\n"
def connection():
    connection = engine.connect()

    yield connection

    connection.close()
Exemple #41
0
from flask import Flask, request, render_template, redirect, url_for, jsonify
from models import User, Movie, engine, session, create_engine
import webbrowser

app = Flask(__name__)

engine = create_engine('postgresql://*****:*****@localhost/movie_db')
db_conn = engine.connect()


# for the web
# create a new row in the movie table
@app.route('/main/createnewmovie', methods=['GET', 'POST'])
def create_movie():
    if request.method == 'POST':
        # check if the user already exists
        check_existing_user = session.query(
            User.id).filter(User.name == request.form['actor_name']).scalar()
        check_movie_exists = session.query(Movie).filter(
            Movie.movie_title == request.form['movie-name']).scalar()
        if check_existing_user and check_movie_exists:
            return "Movie and the role player already exists! Thanks for participation!"
        elif check_existing_user:
            # add new movie
            add_newmovie = Movie(
                movieimdbid=request.form['movie-imdb'],
                movie_title=request.form['movie-name'],
                movie_rating=request.form['ratings'],
                movie_releaseyear=request.form['release-year'],
                movie_votes=request.form['votes'])
            session.add(add_newmovie)
Exemple #42
0
def load_data():
    print 'loading'
    pg = load_page()
    conn = engine.connect()
    print 'here we go'
    for i in pg:
        message = ''
        check = select([bills.c.id],bills.c.name == i.name)
        result = conn.execute(check)
        res = result.fetchone()
        if not res:
            bill = bills.insert().values(name=i.name,long_name=i.long_name)
            result = conn.execute(bill)
            pkey = result.inserted_primary_key[0]
        else:
            pkey = res['id']
        
        # We check existing bills, remember we save link for bother the bills and revision
        check = select([bill_revs],and_(
            bill_revs.c.bill_id == int(pkey),
            bill_revs.c.year == int(i.year)))
  

        key = [k for k in dir(i) if not re.match('^__',k)]
        val = [getattr(i,k) for k in key]
        data = dict(zip(key,val))
        data['bill_id'] = pkey
        
        if 'date_presented' in key:
            data['date_presented'] = datetime.datetime.strptime(i.date_presented,'%d/%m/%Y').date()

        result = conn.execute(check)
        bill_rev = result.fetchone()
        exec_insert = False

        if not bill_rev:
            data['create_date'] = datetime.datetime.now()
            data['update_date'] = datetime.datetime.now()
            revision = bill_revs.insert().values(**data)
            message = 'Bills Started: %s, year %s %s'
            exec_insert = True
        else:
            data['update_date'] = datetime.datetime.now()
            # because the bills is always under debate. so for the same year, and the status change, 
            if bill_rev['status'] != i.status:
                revision = bill_revs.update().\
                    where(
                        and_(
                            bill_revs.c.bill_id == int(pkey),
                            bill_revs.c.year == int(i.year)
                            )
                        ).\
                    values(**data)
                message = 'Bills Updated: %s, year %s %s'
                exec_insert = True
        if exec_insert:
            result = conn.execute(revision)
            
        # stub of post to twitter
        url = settings.URL + '/detail/%d/' % (pkey)
        if message:
            print message % (i.long_name, i.year,URL+url)