def doc(self): dt = json.loads(request.data) lib = dt["lib"] if "dataurl" in dt: dataurl = dt["dataurl"] self.add_lib_record(lib, dataurl) check_lib = list( self.appbuilder.session.query(libModel).filter( libModel.name == lib).all()) if len(check_lib) > 0: return jsonify({ "status": 200, "success": True, "data": { "frame": pd.read_sql("SELECT id,name,ctype,alias from docs", con=ce(check_lib[0].data).connect()).to_dict( orient="record") } }) else: logging.info(f"parsing {lib}") dt, dataurl = parse_lib(lib, import_=True) self.add_lib_record(lib, dataurl) rt = { "status": 200, "success": True, "data": { "rows": len(dt), "frame": dt.df.to_dict(orient="record") } } return jsonify(rt)
def fun_conndb(df_4db, i_tab_nme, i_action): dbengine = ce( 'postgresql://*****:*****@localhost:5432/piyushbijwal' ) if i_action == 'I': df_4db.head(0).to_sql(i_tab_nme, con=dbengine, if_exists='append') df_4db.to_sql(i_tab_nme, con=dbengine, if_exists='append')
def read_api(self): dct = json.loads(request.data) doc_id = dct["id"] lib = dct["lib"] lib_item = self.get_lib(lib) eng = ce(lib_item.data) sess = Session(bind=eng) self.datamodel.session = sess item = self.datamodel.get(doc_id) kids = list(self.item_dict(i) for i in item.kids) parents = list(self.item_dict(i) for i in item.parents) dess = list(self.item_dict(i) for i in item.dess) ancs = list(self.item_dict(i) for i in item.ancs) return jsonify({ "success": True, "status": 200, "data": { "id": item.id, "name": item.name, "names": list(item.names.split(",")), "ctype": item.ctype, "doc": item.doc, #.replace("\n", "<br>"), "level": item.level, "path": item.path, "code": item.code if item.code != "" else "", "kids": kids, "parents": parents, "dess": dess, "ancs": ancs, "lib": lib, "alias": item.alias, } })
def create_engine(db_name='rss_feeds'): # create connection postgres_uname = os.environ.get('postgres_uname') postgres_pass = os.environ.get('postgres_pass') db = 'postgresql://{}:{}@localhost:5432/{}'.format(postgres_uname, postgres_pass, db_name) engine = ce(db, echo=False) return engine
def mock_engine(): from sqlalchemy import create_engine as ce from io import StringIO buf = StringIO() def dump(sql, *multiparams, **params): buf.write(str(sql.compile(dialect=engine.dialect)) + ';') engine = ce('postgresql://', echo=True, strategy='mock', executor=dump) return buf, engine
def trace_up(self, lib, doc_id): lib_item = self.get_lib(lib) eng = ce(lib_item.data) sess = Session(bind=eng) item = sess.query(docModel).filter(docModel.id == doc_id).first() return jsonify({ "status": 200, "success": True, "data": self.trace_up_parse(item) })
def dbconfig(name, echoCmd=True): """ returns a database engine object for querys and inserts ------------- name = name of the PostgreSQL database echoCmd = True/False wheather sqlalchemy echos commands """ #conString = '//username:{pwd}@{host}:{name} engine = ce('postgresql:///{0}'.format(name), echo=echoCmd) return engine
def mock_engine(): ''' Creates a pair of io.StringIO buffer object and a mock engine that does NOT execute SQL queries but only dumps the compiled statements into the buffer object. ''' from sqlalchemy import create_engine as ce from io import StringIO buf = StringIO() def dump(sql, *multiparams, **params): buf.write(str(sql.compile(dialect=engine.dialect)) + ';\n') engine = ce('postgresql://', echo=True, strategy='mock', executor=dump) return buf, engine
def fun_insert(df_4db,i_tab_nme): try: conn=postgres_conn() t_str='postgresql://'+os.getenv("db_user")+':'+os.getenv("logpwd")+'@'+os.getenv("db_host")+':'+os.getenv("db_port")+'/'+os.getenv("db_user") dbengine = ce(t_str) print 'dbengine executed' df_4db.head(0).to_sql(i_tab_nme, con=dbengine,if_exists='append') print 'head(0) executed' df_4db.to_sql(i_tab_nme, con=dbengine,if_exists='append', index=False) print 'data insert executed' conn.close() return True except: lg.echo_msg('Error in function fun_conndb component IDF') return False
def parse_lib(lib, import_=True, obj=None): path = os.path.join(basedir, f"{lib}.db") dataurl = "sqlite:///" + path os.system(f"rm {path}") print(f"creating SQLite db:\t {dataurl}") eng = ce(dataurl) sess = Session(bind=eng) for m in [docModel, docGraphModel, inhGraphModel]: refresh_table(m, engine=eng) if import_: dt = docTour(__import__(lib), lib, sess) else: dt = docTour(obj, lib, sess) return dt, dataurl
def __init__(self, db=179): """ @常量和全局变量放在这 """ self._APIKEY = 'EA8CF467-3ECD-42D6-A59E-97D908AEB57D' self._api = ca(self._APIKEY) self._UTCTIMEFORMAT = "%Y-%m-%dT%H:%M:%S.0000000Z" # coinapi的日期格式 self._LOCALTIMEFORMAT = "%Y-%m-%d %H:%M:%S" # live的日期格式 self._period = {'1mk': '0 days 00:01:00'} if db == 179: # 使用179测试环境david库 self._engine = ce('mysql+pymysql://' + 'ops:ops!@#[email protected]:3308/david') self._conn = self._engine.connect()
def get_lib_page(self, libname): lib_item = self.get_lib(libname) eng = ce(lib_item.data) sess = Session(bind=eng) context = dict({ "lib": libname, "total": sess.query(docModel).count(), "classes": sess.query(docModel).filter(docModel.ctype == "class").count(), "functions": sess.query(docModel).filter(docModel.ctype == "function").count(), "modules": sess.query(docModel).filter(docModel.ctype == "module").count(), "attr_relations": sess.query(docGraphModel).count(), "inheritance": sess.query(inhGraphModel).count(), }) return self.render_template("lib.html", **context)
def read_sql_df(config_file_path, section): conf_sql = read_sql_conf(config_file_path, section) con = ce(conf_sql[0]) return pd.read_sql_table(conf_sql[1], con)
def save_sql_df(df, config_file_path, section): conf_sql = read_sql_conf(config_file_path, section) con = ce(conf_sql[0]) df.to_sql(conf_sql[1], con=con, if_exists='replace') # flavor=conf_sql[0].split('+')[0])
from sqlalchemy import (create_engine as ce, Column, Integer, String, Float, ForeignKey, Table) from sqlalchemy.ext.declarative import declarative_base, declared_attr from sqlalchemy.orm import sessionmaker, relationship # DEFINE DATABASE ENGINE engine = ce('sqlite:///dat') Base = declarative_base() Session = sessionmaker() # DEFINE MAPPING TABLES save_character = Table( 'save_character', Base.metadata, Column('uid', Integer, primary_key=True), Column('gamedata_id', Integer, ForeignKey('gamedata.uid')), Column('character_id', Integer, ForeignKey('character.uid')), ) save_location = Table( 'save_location', Base.metadata, Column('uid', Integer, primary_key=True), Column('gamedata_id', Integer, ForeignKey('gamedata.uid')), Column('location_id', Integer, ForeignKey('location.uid')), ) save_item = Table( 'save_item', Base.metadata,
if name == 'password': self.__dict__['password'] = User.hash_password(value) else: super(User, self).__setattr__(name, value) def __str__(self): return "<User %s, %s>" % (self.username, self.password) async def check_user_in_database(self, connection): res = await connection.execute(User.__table__.select().where(User.__table__.c.username==self.username)\ .where(User.__table__.c.password==self.password).limit(1)) if res.rowcount == 1: row = await res.fetchone() return True return False async def save(self, connection): res = await connection.execute(User.__table__.insert().values( username=self.username, password=self.password)) @classmethod def hash_password(cls, password): return md5(password.encode('ascii')).hexdigest() engine = None if __name__ == '__main__': engine = ce( 'postgresql+psycopg2://everjun:password@localhost:5432/test_db') Base.metadata.create_all(engine)
def eng(dbname='hfrd'): engine = ce('postgresql:///{0}'.format(dbname), echo=True) return engine
# use the following command to install the required python libaries if you are using pip3 # pip3 install panda # pip3 install pymysql # pip3 install sqlalchemy import pandas as pd import pymysql from sqlalchemy import create_engine as ce # please replace the ****** with your password, plus, you might also need replace the database name, in my case it's MH6142 sqlengine = ce('mysql+pymysql://root:******@localhost:3306/MH6142') # in my MH6142 database, there is already a test_abc table, below is just to retrieve it back as dataframe sql = ''' select * from test_abc; ''' dataframe_test_abc = pd.read_sql_query(sql, sqlengine) print(dataframe_test_abc) # write some dataframe to database table test_def, replace it if it's already there dataframe_test_def = pd.DataFrame({ 'id': [1, 2, 3, 4], 'att': ['d', 'e', 'f', 'g'] }) dataframe_test_def.to_sql('test_def', sqlengine, index=False, if_exists='replace') # read the new test_def we just inserted back to another dataframe test_def1
curs = conn.cursor() return {'fname': fName, 'cx': conn, 'crs': curs} def gData(key, gid, hrow=0, indx=None): if not indx: data = pd.read_csv(gUrl.format(key, gid), header=hrow) else: data = pd.read_csv(gUrl.format(key, gid), header=hrow, index_col=indx) data.columns = [i.replace(' ', '').lower() for i in data.columns] return data engine = ce('postgresql:///{0}'.format(dbname), echo=True) ch4GWP = 28 # GWP of Methane mmbf2mcf = 5.44 # From McIver and Morgan #Bioenergy percentages from McIver bio_pct = pd.read_sql('select "index" as year,"Bioenergy"/100 as biopct from mciver_bio where "Bioenergy" is not null', sqlitedb()['cx']) bio_dict = bio_pct.set_index('year').to_dict('index') # Sathre DF with logging residue utilization in bioenergy HWu = pd.read_sql('''SELECT * FROM so4 WHERE harvestslash = "X" AND processingresidues = "X" AND "post-usewoodproduct" = "X"
""" import pandas as pd import MySQLdb as my from sqlalchemy import create_engine as ce from pandas import DataFrame as df ef1 = pd.read_csv('/home/ai21/Desktop/common/Python_Exercises/emp.csv', header=None, names='name empno desig salary deptcode'.split()) ef1['deptcode'] = [121, 122, 123, 121, 121, 123, 122, 121, 121, 124] print ef1 ce1 = ce("mysql://*****:*****@127.0.0.1/ai") #ef1.to_sql("ai_21_emp", ce1) dic1={ 'dept_no':[121,122,123,124],\ 'dept_name':['CSE','ECE','MECH','IT'],\ 'dept_location':['B2F0','B3F2','B1F1','B3F1'] } df1 = df(dic1, columns=['dept_no', 'dept_name', 'dept_location']) #df1.to_sql("ai_21_dep", ce1) con = my.connect('127.0.0.1', 'ai', 'ai', 'ai') ef2 = pd.read_sql("select * from ai_21_emp", con) df2 = pd.read_sql("select * from ai_21_dep", con) df3 = ef2.merge(df2, left_on='deptcode', right_on='dept_no')[['empno', 'name', 'dept_name',
from sqlalchemy import create_engine as ce from selenium import webdriver #load phantomJS driver browser = webdriver.PhantomJS( executable_path= "C:\\Users\\Shashank\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe") premTable = "https://fbref.com/en/comps/9/Premier-League-Stats" laLigaTable = "https://fbref.com/en/comps/12/La-Liga-Stats" bundesligaTable = "https://fbref.com/en/comps/20/Bundesliga-Stats" ligueOneTable = "https://fbref.com/en/comps/13/Ligue-1-Stats" serieATable = "https://fbref.com/en/comps/11/Serie-A-Stats" # create sqlalchemy engine engine = ce("mysql+pymysql://{user}:{pw}@/{db}".format(user="", pw="", db="")) def loadTable(league): if league == "epl": url = premTable tableName = "premTeamTable" elif league == 'bundesliga': url = bundesligaTable tableName = "bundesligaTeamTable" elif league == 'laLiga': url = laLigaTable tableName = "laligaTeamTable" elif league == 'serieA': url = serieATable tableName = "serieATeamTable"
""" create table ai_13_emp_dept(empno int NOT NULL,name varchar(20) NOT NULL,dep_name varchar(20),salary int,PRIMARY KEY (empno)); """ import sqlalchemy.sql import MySQLdb as my import pandas as pd import numpy as np from sqlalchemy import create_engine as ce con = my.connect('localhost', 'aravind', 'sql91011', 'db') df1 = pd.read_sql("select * from ai_13_emp", con) df2 = pd.read_sql("select * from ai_13_dept", con) print df1 df3 = pd.merge(df1, df2, left_on='deptcode', right_on='deptno')[['empno', 'name', 'dep_name', 'salary']] print df3 con = ce('mysql://*****:*****@localhost/db') df3.to_sql("ai_13_emp_dept", con)
import pandas as pd from sqlalchemy import create_engine as ce dbname = 'cmax' engine = ce('postgresql:///{0}'.format(dbname), echo=True) fiaUrl = 'http://apps.fs.fed.us/fiadb-downloads/' def toDF(state, tname, makelower=True, lowm=False): ''' creates a pandas dataframe from the desired table ''' tab = '{0}_{1}.csv'.format(state, tname) df = pd.read_csv(fiaUrl+tab, low_memory=lowm) if makelower is True: df.columns = [i.lower() for i in df.columns] return df else: return df def toDB(state, tname, dbname='cmax', if_ex='replace', makelower=True, lowm=False, isref=False): ''' generic tool for migrating csv to the database WARNING: Tables can be difficult for pandas to parse data types ''' if isref is True: tab = '{0}.csv'.format(tname) else: tab = '{0}_{1}.csv'.format(state, tname)
def storeDataToDB(): if (len(self.result) > 0): cel = ce('mysql://*****:*****@127.0.0.1/ai') result.to_sql("ai_20_result", cel) print("Table Created")
from sqlalchemy import (MetaData, create_engine as ce, Table, Column as Col, Integer as Int, Text, String as Str, DateTime as DT, ForeignKey as FK) DB = ce("postgres://*****:*****@127.0.0.1:5432/sqlalch") metadata = MetaData() product = Table( 'product' , metadata, Col('productID', Int(), primary_key=True), Col('name', Str(25), nullable=False, unique=True), Col('categoryID', Int(), FK('category.categoryID')), Col('price', Int(), nullable=False), Col('description', Text()) ) category = Table( 'category' , metadata, Col('categoryID', Int(), primary_key=True), Col('name', Str(25), nullable=False) ) customer = Table( 'customer' , metadata, Col('customerID', Int(), primary_key=True), Col('name', Str(20), nullable=False) ) order = Table( 'order' , metadata, Col('orderID', Int(), primary_key=True), Col('customerID', Int(), FK('customer.customerID')) ) order_details = Table( 'order_details' , metadata,