def setUp(self): self.timeStart = datetime.datetime.now() self.timeBegin = time.time() print('%s Выполняю тест: %s' % (self.timeStart, self.id())) # соединиться с БД ТИ и АСП DB = TI conS = "DRIVER=FreeTDS; SERVER=%s; PORT=%s; DATABASE=%s; UID=sa; PWD=%s; TDS_Version=8.0; ClientCharset=UTF8; autocommit=True" \ % (DB['DB_address'], DB['DB_port'], DB['DB_name'], DB['DB_password']) try: self.conTI = pypyodbc.connect(conS) self.curTI = self.conTI.cursor() except: print("Возникла ошибка при соединении с БД ТИ, строка соединения %s" % conS) exit(1) # соединиться с АСП DB = ASP conS = "DRIVER=FreeTDS; SERVER=%s; PORT=%s; DATABASE=%s; UID=sa; PWD=%s; TDS_Version=8.0; ClientCharset=UTF8; autocommit=True" % ( DB['DB_address'], DB['DB_port'], DB['DB_name'], DB['DB_password']) try: self.conASP = pypyodbc.connect(conS) self.curASP = self.conASP.cursor() except: print("Возникла ошибка при соединении с БД АСП строка соединения %s" % conS) exit(1) # создать вебдрайвер self.base_url = addr self.driver = webdriver.Firefox() self.driver.implicitly_wait(30) # зайти в АСП self.driver.get(self.base_url + 'Login.aspx') self.verificationErrors = [] self.accept_next_alert = True self.goMainASP()
def main(): DB_wrapper.register(HANA_wrapper) DB_wrapper.register(PSQL_wrapper) hana_db = HANA_wrapper(cursor=pdbc.connect('DSN=hana_new;UID=SYSTEM;PWD=HANA4ever;DATABASE=MIMIC2').cursor(), db_name="mimic", schema_name='MIMIC2') psql_db = PSQL_wrapper(cursor=pdbc.connect('DSN=psql_mimic').cursor(), db_name="MIMIC2", schema_name='mimic2v26') test = DB_validator(modeldb=psql_db, testdb=psql_db, failhard=False)
def get_db_conn(self): db = self.db plat_str = platform.system() if plat_str == "Windows": connection_string ='Driver={SQL Server Native Client 10.0};Server=%s;Database=%s;Uid=%s;Pwd=%s;PORT=%s;' % (self.db["HOST"], self.db["NAME"], self.db["USER"], self.db["PASSWORD"], self.db["PORT"]) self.db_conn = pypyodbc.connect(connection_string) else: connection_string ='DSN=MySQLServerDatabase;Server=%s;Database=%s;Uid=%s;Pwd=%s;PORT=%s;' % (self.db["HOST"], self.db["NAME"], self.db["USER"], self.db["PASSWORD"], self.db["PORT"]) self.db_conn = pypyodbc.connect(connection_string) return self.db_conn
def __init__(self, db, FromParameter=None): if self.parameters is None: self.parameters = dict() if isinstance(db, DbEngine): self.connection = pypyodbc.connect(db.connection.connectString) for key, parameter in db.parameters.items(): if key in self.parameters: self.parameters[key].parameters = parameter.parameters else: self.connection = pypyodbc.connect(db) self.cursor = self.connection.cursor() self.state = random() self.checkstate = random() self.refresh()
def write_schema(self, mdb_file_path, **field_types): """ :param mdb_file_path: The file path of the mdb database to create :param field_types: A mapping of table, field pairs to field types. Allowable field types are text, float and int If missing, primary key fields are text, and data fields are float :return: """ verify(dictish(field_types), "field_types should be a dict") for k,v in field_types.items() : verify(k in self.tic_dat_factory.all_tables, "%s isn't a table name"%k) verify(dictish(v), "Need a mapping from field names to field types for %s"%k) for fld,type_ in v.items() : verify(fld in self.tic_dat_factory.primary_key_fields.get(k[0], ()) + self.tic_dat_factory.data_fields.get(k[0], ()), "%s isn't a field name for table %s"%(fld, k)) verify(type_ in ("text", "float", "int"), "For table %s, field %s, %s isn't one of (text, float, int)"%(k, fld, type_)) get_fld_type = lambda tbl, fld, default : field_types.get(tbl, {}).get(fld, default) if not os.path.exists(mdb_file_path) : verify(self.can_write_new_file, "Writing to a new file not enabled") py.win_create_mdb(mdb_file_path) with py.connect(_connection_str(mdb_file_path)) as con: for t in self.tic_dat_factory.all_tables: str = "Create TABLE %s (\n"%t strl = ["%s %s"%(f, get_fld_type(t, f, "text")) for f in self.tic_dat_factory.primary_key_fields.get(t, ())] + \ ["%s %s"%(f, get_fld_type(t, f, "float")) for f in self.tic_dat_factory.data_fields.get(t, ())] if self.tic_dat_factory.primary_key_fields.get(t) : strl.append("PRIMARY KEY(%s)"%",".join (self.tic_dat_factory.primary_key_fields[t])) str += ",\n".join(strl) + "\n);" con.cursor().execute(str).commit()
def main(): # region login # opens config file Config = configparser.ConfigParser() Config.read("config.ini") # reading base64 login from config.ini driver = Config.get("Login","Driver") server = Config.get("Login","Server") database = Config.get("Login","Database") uid = Config.get("Login","uid") pwd = Config.get("Login","pwd") # decoding credentials driver = str(base64.b64decode(driver)).strip('b\'') server = str(base64.b64decode(server)).strip('b\'') ind = server.index("\\\\") server = server[:ind] + server[ind+1:] database = str(base64.b64decode(database)).strip('b\'') uid = str(base64.b64decode(uid)).strip('b\'') pwd = str(base64.b64decode(pwd)).strip('b\'') login = ("Driver=%s;Server=%s;Database=%s;uid=%s;pwd=%s" % (driver, server, database, uid, pwd)) connection = pypyodbc.connect(login) # endregion login # region open tkinter window root = tk.Tk() app = MainApplication(root, connection, server) root.mainloop() connection.close()
def UpdateMDB(tables): """Update the data in MonetDB.""" Info('Updating MonetDB') if simulation_mode: cursor=None else: import pypyodbc as pyodbc conn=pyodbc.connect(monet_string, autocommit=False) cursor=conn.cursor() # TODO Allow only dropping of tables (no creation)? Info('Dropping existing tables') for table in reversed(tables): DropTableMDB(cursor,table) Info('Creating tables') for table in tables: CreateTableMDB(cursor,table) Info('Inserting data into tables') for table in tables: InsertTableMDB(cursor,table,export) if arity[table]=='onetoone' or arity[table]=='onetomany': RemoveOrphansMDB(cursor,table,keys[table],maintable,keys[maintable]) Info('Altering tables') for table in tables: if arity[table]=='maintable' or arity[table]=='onetoone': SetKeyPrimaryMDB(cursor,table,keys[table]) if arity[table]=='onetoone' or arity[table]=='onetomany': SetKeyForeignMDB(cursor,table,keys[table],maintable,keys[maintable]) SetTableReadOnlyMDB(cursor,table) RunSQL(cursor,None,True) # Explicit commit. if not simulation_mode: conn.close()
def collect_pairs(outfile): with open(outfile,'w', newline='') as f: csv_writer = csv.writer(f, delimiter=',') wrow = ('a_station', 'b_station', 'distance') csv_writer.writerow(wrow) with pypyodbc.connect("DRIVER={SQL Server};SERVER=miranda;DATABASE=bikeshare;Trusted_Connection=true") as conx: curr = conx.cursor() retrieved_data = curr.execute(""" select a.stationid A_station, b.stationid B_station, a.lat a_lat, a.long a_long, b.lat b_lat, b.long b_long from stations a cross join stations b where a.stationid < b.stationid """) for row in retrieved_data: a_lat = row['a_lat'] a_long = row['a_long'] b_lat = row['b_lat'] b_long = row['b_long'] a_station = row['a_station'] b_station = row['b_station'] a_pt = (a_lat, a_long) b_pt = (b_lat, b_long) distance = vincenty(a_pt, b_pt).miles wrow = (a_station, b_station, distance) csv_writer.writerow(wrow)
def _get_connection(self): if self._connection: return self._connection connection_string = 'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s' % \ self.rutamdb self._connection = pypyodbc.connect(connection_string) return self._connection
def loadglobalvars(): global driver global server global database global uid global password global customerID global conn global cursor global encodedlp global encodedlpshort global lastreviewedapplicantworkflow global latestapplication driver = '{ODBC Driver 11 for SQL Server}' #formerly {SQL Server} {SQL Server Native Client 11.0} server = '382CFS1\SSESNEXGEN' #formerly 382CFS1\SSESNEXGEN database = 'iCIMS' uid = 'Zac' password = '******' customerID = '3786' #print(driver, server, database, uid, password) conn = pypyodbc.connect(driver=driver, server=server, database=database, uid=uid, pwd=password) cursor = conn.cursor() encodedlp = str(base64.standard_b64encode(b'SoteraAPI:Tester123')) encodedlpshort = re.findall("b'(.*)'", encodedlp) encodedlpshort = str(encodedlpshort[0]) lastreviewedapplicantworkflow = '' latestapplication = [] logging.basicConfig(filename='debug.log',level=logging.DEBUG)
def edit(): if request.method == 'GET': return render_template("Edit.html"); elif request.method == 'POST': try: myConnection = pypyodbc.connect('Driver={SQL Server};' 'Server=SIMON-HP\SQLEXPRESS;' 'Database=AlkimiiAdmin;' 'uid=sa;pwd=12345') myCursor = myConnection.cursor() Name = request.form['EditName']; Email = request.form['EditEmail']; Password = request.form['EditPassword']; EditId = request.form['EditId']; SQLCommand = ("UPDATE Users " "SET Name = '" + Name + "', Email = '" + Email + "', Pword = '" + Password + "' WHERE UsererId = " + EditId) myCursor.execute(SQLCommand) myConnection.commit(); myConnection.close(); return redirect(url_for('display')); except: return "<h2>Error Occurred</h2>"
def __init__(self): __connectionString = ( "Driver={SQL Server Native Client 11.0};Server=localhost;Database=QuizDB;uid=sa;pwd=Tit@nium1591" ) self.connection = pypyodbc.connect(__connectionString) self.cursor = self.connection.cursor() return
def update_tenant_general_information(self, tenant): """ Takes Tenant representation as parameter. Update the Tenants general information :param tenant: A Tenant representation :type tenant: Tenant class :return: True """ connection_string = self.get_connection_string() try: my_connection = pypyodbc.connect(connection_string) cursor = my_connection.cursor() cursor.execute("{call UpdateGymInfo('" + str(tenant['id']) + "','" + tenant['password'] + "','" + tenant['gym_name'] + "','" + tenant['address'] + "','" + tenant['phone'] + "','" + tenant['zip_code'] + "','" + tenant['city'] + "','" + tenant['email'] + "')}") cursor.commit() cursor.close() my_connection.close() return True except pypyodbc.DatabaseError as error: print(error.value)
def update_tenant_information(self, tenant): """ Takes Tenant representation as parameter. Update the Tenants information :param tenant: A Tenant representation :type tenant: Tenant class :return: True """ connection_string = self.get_connection_string() try: my_connection = pypyodbc.connect(connection_string) cursor = my_connection.cursor() # If the Tenant don't want to update password, don't send the password. if tenant['new_password'] is '': cursor.execute("{call UpdateTenant('" + str(tenant['id']) + "','" + tenant['password'] + "','" + tenant['active_fortnox'] + "','" + tenant['image'] + "','" + tenant['background_color'] + "')}") else: cursor.execute("{call UpdateTenant('" + str(tenant['id']) + "','" + tenant['password'] + "','" + tenant['active_fortnox'] + "','" + tenant['image'] + "','" + tenant['background_color'] + "','" + tenant['new_password'] + "')}") cursor.commit() cursor.close() my_connection.close() return True except pypyodbc.DatabaseError as error: print(error.value)
def getIndexData(indexName,rate): riseOrDown=[] afterChange=[] theData=[] connect = pypyodbc.connect('driver=freetds;server=;port=1433;uid=;pwd=;DATABASE=record_database_daily') cur = connect.cursor() try: for inf in cur.execute("select riseOrDown,dateT from %s"%(indexName)):#select roi from AEX where dateT='10:26'; theData.append(inf) except pypyodbc.Error: pass finally: cur.close() connect.close() for everyrow in theData: riseOrDown.append(everyrow[0]) stderror=numpy.std(riseOrDown)*rate for everyrow in theData: if abs(everyrow[0])>=stderror: if everyrow[0]>0: afterChange.append((everyrow[1],"UP")) else: afterChange.append((everyrow[1],"DOWN")) else: afterChange.append((everyrow[1],"UNCHANGE")) return afterChange
def sqlconnect(driver=driver, server=server, database=database, uid=uid, password=password): ### This establishes the connection to the SQL Server using an ODBC connection ### global conn global cursor conn = pypyodbc.connect(driver=driver, server=server, database=database, uid=uid, pwd=password) cursor = conn.cursor() return cursor
def add(): if request.method == 'GET': try: return render_template("Add.html"); except: return "<h2>Error in the get</h2>" elif request.method == 'POST': try: myConnection = pypyodbc.connect('Driver={SQL Server};' 'Server=SIMON-HP\SQLEXPRESS;' 'Database=AlkimiiAdmin;' 'uid=sa;pwd=12345') myCursor = myConnection.cursor() name = request.form['AddName']; email = request.form['AddEmail']; password = request.form['AddPassword']; SQLCommand = ("INSERT INTO Users " "(Name, Email, Pword) " "VALUES (?,?,?)") values = [name, email, password] myCursor.execute(SQLCommand,values) myConnection.commit(); myConnection.close(); return redirect(url_for('display')); except: return "<h2>Error Occurred</h2>"
def get_statistic_tagevents(self, user_id=0): """ Takes user_id as parameter Get all tagevents used for the statistic page :param user_id: User id :type user_id: integer :return: Array of Statistic tagevents """ connection_string = self.get_connection_string() try: my_connection = pypyodbc.connect(connection_string) cursor = my_connection.cursor() cursor.execute("{call GetStatisticTagevents('" + str(user_id) + "')}") value = cursor.fetchall() cursor.close() my_connection.close() return_array = [] # [:-8] is to remove unnecessary decimals for statistics_tagevent in value: return_array.append(statistic_tagevent.SQLStatisticTagevent(statistics_tagevent[0], statistics_tagevent[1][:-8], statistics_tagevent[2], statistics_tagevent[3])) return return_array except pypyodbc.DatabaseError as error: print(error.value)
def unlockhrs(): cnxn = pyodbc.connect('Trusted_Connection=yes;DRIVER={SQL Server};SERVER=blabla;DATABASE=xxx;UID=domain1\xxx;PWD=xxx') username = e cursor = cnxn.cursor() cursor.execute("update tbl_UserActivity set UserBlock = 0 where UserName = %s, username") cursor.commit() cursor.execute("select * from tbl_UserActivity where UserName = %s, username")
def connectdb(self): pypyodbc.lowercase = False conn = pypyodbc.connect( r"Driver={Microsoft Access Driver (*.mdb, *.accdb)};" + r"Dbq=Database21.accdb;") cur = conn.cursor() return cur
def querytodict(sql, db, n): ## Takes a sql query and turns result into dict of dictionarys, keyed on nth column: # d[key value][field], eg. d['rob']['home'] -> hove #print('DB is:', db) connection = pypyodbc.connect(db) cursor = connection.cursor() # cur.execute("set character_set_results = 'latin1'") try: x = cursor.execute(sql) except: print('#######################################################################') print(sql) warning('SQL error in query to dict:') print(errors) raise rows = x.fetchall() fields = [tuple[0] for tuple in x.description] types = [tuple[1] for tuple in x.description] sizes = [tuple[3] for tuple in x.description] d = {} for r in rows: # print(r) d[r[n]] = {} d[r[n]] = dict(zip(fields, r)) connection.close() return d
def __init__(self, resrcID, parameter): DataSource.__init__(self, resrcID, parameter) connStr = self.getConnStr() conn = pypyodbc.connect(connStr) self.cur = conn.cursor() return
def test_sql(self, db, sql, group): if not(sql=='ERROR'): connection = pypyodbc.connect(db) cursor = connection.cursor() countrecords = 'select count(*), count(distinct '+self.sqlgroups[group]['key']+') from (' + sql + ' ) xxx' try: x = cursor.execute(countrecords) row = x.fetchone() print('SQL okay for', group, ': ', row[0], 'records') if group not in self.onetomany and row[0]!=row[1]: warning('ERROR: duplicate keys for onetoone group ' + group) warning('ERROR: DROPPING group ' + group) foo=[x for x in self.onetoone if x not in [group]] self.onetoone=foo #print(' sample is top '+ sample) except: print('#######################################################################') print(sql) warning('SQL error in test SQL:') print(errors) raise
def getIndexData(indexName,rate): riseOrDown=[] afterChange=[] theData=[] try: connect = pypyodbc.connect('driver=freetds;server=;port=1433;uid=;pwd=;database=record_database_daily;timeout=2') cur = connect.cursor() for inf in cur.execute("select riseOrDown,dateT from %s"%(indexName)): theData.append(inf) except pypyodbc.Error: print ("Database link error") finally: cur.close() connect.close() for everyrow in theData: riseOrDown.append(everyrow[0]) stderror=numpy.std(riseOrDown)*rate for everyrow in theData: if abs(everyrow[0])>=stderror: if everyrow[0]>0: afterChange.append((everyrow[1],0))#up else: afterChange.append((everyrow[1],2))#down else: afterChange.append((everyrow[1],1))#unchange return afterChange
def connect_access_db(filename): """Returns a connection to the Access database.""" driver = '{Microsoft Access Driver (*.mdb, *.accdb)}' con = pypyodbc.connect('Driver={0};Dbq={1};Uid=Admin;Pwd=;' .format(driver, filename)) return con
def get_whereabouts(payroll_id, date_from, date_to): cnxn = pyodbc.connect( "DRIVER={FreeTDS};SERVER=KNOX;PORT=1433;DATABASE=employee;UID=public_directory_ro;PWD=Pdro6232HFfsdk;UseNTLMv2=yes;TDS_Version=8.0;Trusted_Domain=dcl;autocommit=True" ) cursor = cnxn.cursor() params = (payroll_id, date_from, date_to) sql = "exec employee.public_ro.where_qry_emp @payroll=?, @start_date=?, @end_date=?" result = cursor.execute(sql, params) wbts = [] db_output = result.fetchall() for i in range(0, len(db_output), 2): entry = {} morn = {"text": db_output[i][10][:3], "style": "state" + db_output[i][10].upper()} aft = {"text": db_output[i + 1][10][:3], "style": "state" + db_output[i + 1][10].upper()} if db_output[i][5].strftime("%a") == "Fri": entry["style"] = "endOfWeek" if morn["text"] != aft["text"]: entry["split"] = True entry["morning"] = morn entry["afternoon"] = aft else: entry["split"] = False entry["day"] = aft wbts.append(entry) return wbts
def CreateAssociation(self): # Within the CreateAssociation function call the Stored Procedure # you implementsin the last module to create the association. import pypyodbc try: # 1 Define the Database Connection dbConnection = pypyodbc.connect("Driver={SQL Server};" "Server=localhost;" "Database=DBS_RetailBanking;") # 2 Define the SQL Command sqlCommand = "{CALL uspCreateAssociation (?, ?,?,?)}" # 3 Define parameter values parameters = [self.__newCustomerName, self.__customerId1, self.__customerId2, self.__associationTypeId] # 3 Replace Parameter Placeholders and Execute Command cursor = dbConnection.cursor() cursor.execute(sqlCommand, parameters) # 4 Commit Changes dbConnection.commit() except ConnectionError as ex: raise ex except Exception as ex: raise ex finally: # 5 Close Database Connection if(dbConnection is not None): dbConnection.close()
def writeSQL ( odbc_dsn, commit_str, debug ): db_con = pypyodbc.connect( 'DSN=%s' % odbc_dsn ) db_cur = db_con.cursor() db_cur.execute(commit_str).commit() db_con.close()
def __init__(self): self.con_str = 'Driver={SQL Server};' \ 'Server=192.168.11.9 ;' \ 'Database=i_collect;' \ 'Uid=sa;Pwd=12121212;' self.con = p.connect(self.con_str, autocommit=True) self.cur = self.con.cursor()
def test_connection ( odbc_dsn, table_name, table_create_file, debug=gDebug ): try: db_con = pypyodbc.connect( 'DSN=%s' % odbc_dsn ) db_cur = db_con.cursor() except Exception as e: writelog( "***ERROR: unable to connect to DSN=%s: %s" % ( odbc_dsn, e ), True ) sys.exit(2) None #time.sleep(0.1) #else: # if debug: print "\t odbc connection for %s success" % odbc_dsn test_query = '''SELECT * FROM {table_name} LIMIT 1''' test_query = test_query.format( table_name = table_name ) if debug: print "\t%s" % test_query try: db_cur.execute( test_query ) except Exception as e: writelog( "***ERROR: test_query failed" ) if table_create_file: try: writelog( "***Trying to CREATE TABLE %s.%s" % ( odbc_dsn, table_name ) ) create_table( db_con, db_cur, table_create_file, debug ) db_cur.execute( test_query ) #retry test just in case except Exception as err: writelog( "***ERROR: unable to create table %s.%s %s" % ( odbc_dsn, table_name, err), True ) sys.exit(2) else: writelog( "\tNo table_create_file given, exiting: %s" % e, True ) sys.exit(2) db_con.close() writelog( "\t%s.%s READY" % ( odbc_dsn, table_name ) )
import pypyodbc import pandas as pd import json import psycopg2 from flask import Flask from flask import Flask, jsonify from flask import request app = Flask(__name__) cnxn = pypyodbc.connect("Driver={ODBC Driver 13 for SQL Server};" "Server=localhost;" "Database=InpatrimoniumNET_DEMO;" "uid=SA;pwd=P@55w0rd") @app.route("/api/record") def record_by_id(): id = request.args.get('id') df = pd.read_sql_query( 'select * from [dbo].[vw_Objecto_DublinCore] WHERE objecto_id = ' + str(id), cnxn) out = df.to_json(orient='records')[1:-1].replace('},{', '} {') print(out) return (out) @app.route("/api/records")
def GET(self, userModelName): cnxn = pypyodbc.connect("Driver={ODBC Driver 17 for SQL Server};" "Server=52.211.54.45;" "Database=AdaptiveNewsDatabase;" "uid=sa;pwd=cobra@123") cursor = cnxn.cursor() cursor.execute('select * from [UserModels] where UserModelName=' + '\'' + userModelName + '\'') if cursor.rowcount == 0: return json.dumps('User Model doesn\'t exist') else: for row in cursor: userModelID = row[0] print(userModelID) break cursor.execute('select * from UserModelWeights where UserModelID=' + str(userModelID)) weight = 0.0 categoryID = 1 arr = [] newsCounter = 0 if cursor.rowcount == 0: return json.dumps('Model not assigned any wieghts') for row in cursor: weight = row[3] breakPoint = 1 weightedNewsCount = int(round(weight * 20)) # Sports if (categoryID == 1): categoryName = 'sports' with urllib.request.urlopen( "https://newsapi.org/v2/top-headlines?country=us&category=sports&apiKey=fcd148d3e7a44031b2f7ef24590d12f8" ) as url: data = json.loads(url.read().decode()) elif (categoryID == 2): categoryName = 'entertainment' with urllib.request.urlopen( "https://newsapi.org/v2/top-headlines?country=us&category=entertainment&apiKey=fcd148d3e7a44031b2f7ef24590d12f8" ) as url: data = json.loads(url.read().decode()) elif (categoryID == 3): categoryName = 'politics' with urllib.request.urlopen( "https://newsapi.org/v2/top-headlines?country=us&category=politics&apiKey=fcd148d3e7a44031b2f7ef24590d12f8" ) as url: data = json.loads(url.read().decode()) elif (categoryID == 4): categoryName = 'technology' with urllib.request.urlopen( "https://newsapi.org/v2/top-headlines?country=us&category=technology&apiKey=fcd148d3e7a44031b2f7ef24590d12f8" ) as url: data = json.loads(url.read().decode()) elif (categoryID == 5): categoryName = 'business' with urllib.request.urlopen( "https://newsapi.org/v2/top-headlines?country=us&category=business&apiKey=fcd148d3e7a44031b2f7ef24590d12f8" ) as url: data = json.loads(url.read().decode()) for doc in data['articles']: if (breakPoint > weightedNewsCount or newsCounter == 20): break # print(doc['title']) if (doc['description'] != None and doc['description'] != ""): arr.append([]) arr[newsCounter].append(categoryName) arr[newsCounter].append(newsCounter + 1) arr[newsCounter].append(doc['title']) arr[newsCounter].append(doc['description']) arr[newsCounter].append(doc['url']) newsCounter += 1 breakPoint += 1 categoryID += 1 jsonData = [] count = 1 for arrayobject in arr: jsonData.append({ "Category": arrayobject[0], "NewsNumber": count, "title": arrayobject[2], "description": arrayobject[3], "url": arrayobject[4], }) count += 1 return json.dumps(jsonData, indent=2)
# Models from sklearn.svm import SVR, LinearSVR from sklearn.ensemble import RandomForestRegressor, ExtraTreesClassifier from sklearn.linear_model import Ridge, Lasso from sklearn import cluster from sklearn.neighbors import KNeighborsClassifier # For scaling/normalizing values from sklearn.preprocessing import MinMaxScaler #For splitting data in train and test set; from sklearn.model_selection import train_test_split connection = pypyodbc.connect('Driver={SQL Server};' 'Server=X.X.X.X;' 'Database=Dump_Tables;' 'uid=X.X.X.X;pwd=X.X.X.X') #today_date = datetime.today().strftime('%Y-%m-%d') #today_date = '2017-12-25' date_1 = DateTime.today() end_date = date_1 + TimeDelta(days=-1) today_date = end_date.strftime('%Y-%m-%d') print(today_date) r2_query = '''select users.userID,users.firstDepositDate,sum(case when r2.[date] between users."7thDay" and users."13thDay" then CashGameCount end) as '7to14_CashGames',count(distinct case when r2.[date] between users."7thDay" and users."13thDay" then CONVERT(DATE, r2.[date]) end) as '7to14_ActiveDays',Sum(case when r2.[date] between users."7thDay" and users."13thDay" then GameWon end) as '7to14_WinCashGames', Sum(case when r2.[date]between users."7thDay" and users."13thDay" then GameLost end) as '7to14_LossCashGames',avg(case when r2.[date] between users."7thDay" and users."13thDay" then r2.entryFee end) as '7to14_AvgEntryFee',avg(case when r2.[date] between users."7thDay" and users."13thDay" then r2.seat end) as '7to14_AvgComposition',Sum(case when r2.[date] between users."7thDay" and users."13thDay" then Rake end) as '7to14_RakeGenerated',sum(case when r2.[date] between users."14thDay" and users."21stDay" then CashGameCount end) as '14to21_CashGames',count(distinct case when r2.[date] between users."14thDay" and users."21stDay" then CONVERT(DATE, r2.[date]) end) as '14to21_ActiveDays',SUM(case when r2.[date] between users."14thDay" and users."21stDay" then r2.GameWon end) as '14to21_WinCashGames',SUM(case when r2.[date] between users."14thDay" and users."21stDay" then r2.GameLost end) as '14to21_LossCashGames',avg(case when r2.[date] between users."14thDay" and users."21stDay" then r2.EntryFee end) as '14to21_AvgEntryFee',avg(case when r2.[date] between users."14thDay" and users."21stDay" then r2.seat end) as '14to21_AvgComposition',SUM(case when r2.[date] between users."14thDay" and users."21stDay" then r2.Rake end) as '14to21_RakeGenerated' from Dump_Tables.dbo.R2New r2 join (select userID,CONVERT(DATE, firstDepositDate) as firstDepositDate,Dateadd(dd,7,CONVERT(DATE, firstDepositDate)) as '7thDay',dateadd(dd,13,CONVERT(DATE, firstDepositDate)) as '13thDay',Dateadd(dd,14,CONVERT(DATE, firstDepositDate)) as '14thDay',Dateadd(dd,21,CONVERT(DATE, firstDepositDate)) as '21stDay' from JWR.dbo.Users where firstDepositDate is not NULL and userID != -1 and CONVERT(DATE, firstDepositDate) = dateadd(dd,-21,CONVERT(DATE,\''''+today_date+'''\'))) users on users.userID=r2.userId and r2.[date] between users."7thDay" and users."21stDay" group by users.userID,users.firstDepositDate;'''
def querydb(sql='', script='', dsn='', file='--', attempts=1, batchsize=100, delim='\t', nofetch=0, nowrite=0, as_dict=0, sqlserver=0): ''' Execute SQL query and return results to a file instead of RAM. Exports results to results.txt unless specified otherwise. Useful when result dataset would be very large or you want to directly store to disk without any further processing. ''' if file == '--': file = 'results_{ts}.txt'.format(ts=dtnow(simple=1)) if script != '': sql = readfile(script) try: sql = ' \n '.join(sql) except: pass print('Connecting to DB') pypyodbc.SQL_AUTOCOMMIT_OFF = 1 if sqlserver == 1: sqlserver_cnx = ';MultiSubnetFailover=Yes' pass conn = pypyodbc.connect('DSN=' + str(dsn) + sqlserver_cnx) curs = conn.cursor() counter = 0 print('Running Query') results = curs.execute(sql) if nofetch == 1: curs.commit() return print('Getting column names') colnames = [column[0] for column in curs.description] # If nowrite has been selected fetch all data and return if nowrite == 1: # Return a dictionary with column names dataset = curs.fetchall() if as_dict == 0: return {'columns': colnames, 'data': dataset} # returns data in list of dictionaries format if as_dict == 1: data_dict = [{ colnames[i]: dataset[c][i] for i in range(len(colnames)) } for c in range(len(dataset))] return data_dict outfile = open(file, 'w') # Write column names header outfile.write(delim.join(colnames) + '\n') rowcount = 1 while rowcount > 0: temp1 = curs.fetchmany(batchsize) rowcount = len(temp1) for r in temp1: str_r = [str(x) for x in r] outfile.write(delim.join(str_r) + '\n') counter += rowcount print(counter) outfile.close() curs.close() conn.close() print('Results in {f}'.format(f=file)) return
from datetime import datetime from flask import Flask, render_template, jsonify import pypyodbc from datetime import datetime from flask import render_template, redirect, request app = Flask(__name__) print(__name__) # creating connection Object which will contain SQL Server Connection connection = pypyodbc.connect( 'Driver={SQL Server};Server=20.20.1.7;Database=X94D_DERIVE;uid=driveapp;pwd=daVIZta@117' ) # Creating Cursor # # cursor = connection.cursor() # cursor.execute("SELECT * FROM USERS_MASTER") # s = "<table style='border:1px solid red'>" # for row in cursor: # s = s + "<tr>" # for x in row: # s = s + "<td>" + str(x) + "</td>" # s = s + "</tr>" # connection.close() @app.route('/') def home(): return render_template('index.html') @app.route('/api/tasks', methods=['GET'])
def connect(self): if self.conn is False: self.conn = \ pypyodbc.connect("DRIVER={SQL Server};Server=%s;port=%s;uid=%s;pwd=%s;database=%s;readonly=true" % (self.server, self.port, self.username, self.password, self.database), timeout=60)
import pypyodbc import json connection = pypyodbc.connect(driver='{SQL Server}', server='DESKTOP-7GE22QK\SQLEXPRESS', database='TravelAgency') cursor = connection.cursor() SQLQuery = ("""Select Employees.PIP, Employees.Years, Employees.Phone From Employees""") cursor.execute(SQLQuery) result = cursor.fetchall() json.dumps(result) print(result) connection.close()
import pypyodbc import pandas as pd connection = pypyodbc.connect("Driver={SQL Server Native Client 11.0};" "Server=DESKTOP-9SU28DA;" "Database=Codal;" "Trusted_Connection=Yes;") cursor = connection.cursor() sqlcommand = 'select * from monthly_report_url' cursor.execute(sqlcommand) results = cursor.fetchone() while results: url1396 = results[2] if url1396 != NULL read_table = pd.read_html(url1396, encoding='UTF-8') try: monthly_sales = read_table[2].iloc[-1, -1] print(monthly_sales) except IndexError: print(results) print(results) results = cursor.fetchone() connection.close()
- Generates Invoice and stores in G:\Team Drives\Alumni All Staff\Hesburgh Lecture Series\Confirmation Packet\Invoices - Drafts Coordinator Email - Drafts Faculty Email - Updates Google Calendar for [email protected] - Inserts entry for new lecture in the MyND Events Calendar List Google Spreadsheet This Module program uses 4 accompanying HLS related modules (called below) and is a direct executable """ import pypyodbc import G_PDF_Generator, G_Google_Calendar, G_Gmail_Access, Google_Spreadsheet import time CON = pypyodbc.connect( r'DRIVER={Microsoft Access Driver (*.mdb)};UID=admin;UserCommitSync=Yes;Threads=3;' r'SafeTransactions=0;PageTimeout=5;MaxScanRows=8;MaxBufferSize=2048;FIL={MS Access};' r'DriverId=25;DefaultDir=C:/Users/sdetremp/Python_MS_Access/Database;' r'DBQ=P:\Databases\Academic Databases\AcademicPrograms_apps_Robby.mdb;') CURSOR = CON.cursor() def main(): # Prompt user for number of HLS events to process and which parts to process print('Welcome to the Hesburgh Lecture Series Processing Program') user_input = input( 'Please select the number of events you would like to process: \n') which_parts = input( 'Which of the following would you like to generate: (Please separate numbers with a comma(,))' '\n0: All' '\n1: Confirmation'
import pypyodbc SQLserver = "DESKTOP-MMDDPMA\SQLEXPRESS" SQLDatabase = "AdventureWorks2017" SQLconnect = pypyodbc.connect('driver={SQL server};' 'server=' + SQLserver + ';' 'Database=' + SQLDatabase + ' ;') cursor = SQLconnect.cursor() SQLQuery = (""" SELECT addressline1 [City] FROM [AdventureWorks2017].[Person].[Address] """) cursor.execute(SQLQuery) results = cursor.fetchall() print(results) SQLconnect.close()
#indices campo 8 indice_inicial_campo_8 = 170 indice_final_campo_8 = 190 #indices campo 9 indice_inicial_campo_9 = 190 indice_final_campo_9 = 210 #Ruta ficheros ruta_fichero_access = 'C:\\Users\\34637\\Documents\\BD_2019.accdb' ruta_fichero_txt = "C:\\Users\\34637\\Desktop\\fichero_txt_a_mdb\\fichero.TXT" #Programa dbname = ruta_fichero_access constr = "DRIVER={{Microsoft Access Driver (*.mdb, *.accdb)}};DBQ={0};".format( dbname) #Conectamos con bd access dbconn = pypyodbc.connect(constr) cur = dbconn.cursor() #Preparamos SQL sql = 'INSERT INTO BASE_DATOS_MAYOR VALUES ( ? , ?, ?, ?, ?, ?, ?, ?, ?)' f = open(ruta_fichero_txt, "r") lines = f.readlines()[1:-1] for x in lines: params = (x[indice_inicial_campo_1:indice_final_campo_1], x[indice_inicial_campo_2:indice_final_campo_2], x[indice_inicial_campo_3:indice_final_campo_3], x[indice_inicial_campo_4:indice_final_campo_4], x[indice_inicial_campo_5:indice_final_campo_5], x[indice_inicial_campo_6:indice_final_campo_6], x[indice_inicial_campo_7:indice_final_campo_7], x[indice_inicial_campo_8:indice_final_campo_8],
# -*- coding: utf-8 -*- import pypyodbc connection = pypyodbc.connect(driver='{ODBC Driver 17 for SQL Server}', server='tcp:databaseproject777.database.windows.net,1433', database='test_database', uid='Marcin', pwd='projekt.BD2.1') cursor = connection.cursor() tables_and_columns = cursor.execute("SELECT * FROM INFORMATION_SCHEMA.COLUMNS") print("YOU SHOULD SEE ALL TABLES AND COLUMNS LISTED BELOW:\n") for row in tables_and_columns: print(f"Table: {row[2]}; Column: {row[3]}")
if OffSet == None: #Ensure something was selected for WET thickness msgbox("Please re-run the program and enter an offset, even if it's 0.0", title="WET box closed without entry") exit() try: # Ensure entered WET value is a sensible entry float(OffSet) except ValueError: msgbox("Please re-run the program and enter an appropriate value for the WET offset", title="WET Value Error") exit() OffSet=float(OffSet) # OffSet is entered as a string - this converts it to a float # pypyodbc is a library allowing you to connect to an SQL database. This is some code that I copied from google, # The important bit is the DBQ= bit where you put the location of the database back end. conn = pypyodbc.connect( r'Driver={Microsoft Access Driver (*.mdb, *.accdb)};' r'DBQ=O:/protons/Work in Progress/Christian/Database/Proton/AssetsDatabase_be - CB.accdb' ) # C:/Users/cgillies/Desktop/Python_3/GIT Some/TwoDataSets-PDD/AssetsDatabase_beCG.accdb;' # r'PWD=1234;' cursor = conn.cursor() cursor.execute('select * from Operators') #This line adds the "operators" table to the cursor for row in cursor.fetchall(): # This Fetches the data from the cursor Operators.append(row[2]) # This fetches the names and appends them to the operators list. Operator = choicebox("Who performed the measurements?", "Operator", Operators) # This allows user to select from the database list of operators #################### # In order to write to the database, two tables need to be filled. # One table needs to be filled in first and it requires the date and gantry angle.
Created on Tue Sep 24 09:29:54 2019 @author: isaac.nyamekye """ # Import Required Libraries import pandas as pd import numpy as np import pypyodbc import datetime # Extract Data from TRF SQL Server # Connects to TRF data located on SQL Server connection = pypyodbc.connect('Driver={SQL Server};' 'Server=EDM-GOA-SQL-452;' 'Database=A_STAGING;') # Defines and Executes SQL code to pull required variables from SQL Server sql = ('SELECT trf_id,' 'MAX(referraldate) AS referral_date,' 'MAX(completeddatetime) AS completed_date,' 'MAX(noc_noc) AS noc,' 'MAX(age) AS age,' 'MAX(postalcode) AS postal_Code,' 'MAX(gender_fmt) AS gender,' 'MAX(referralstatus_fmt) AS referral_status,' 'MAX(esdcfeedback) AS esdc_feedback_num,' 'MAX(esdcfeedback_fmt) AS esdc_feedback,' 'MAX(serviceproviderfeedback_fmt) AS service_provider_feedback' ' FROM trf.referral GROUP BY trf_id ORDER BY trf_id;') df1 = pd.read_sql(sql, connection)
# - *- coding: utf- 8 - *- import telebot from telebot import types import pypyodbc mydb = pypyodbc.connect( "Driver={SQL Server};" "Server=DESKTOP-3F59213;" "Database=Telebot;" ) cursor = mydb.cursor() mySQLQuery = (""" SELECT TOP 11 * FROM dbo.adresa """) token = '1181992733:AAH79PQaMpHMJfazGSL4SZNLGSzdCWPc5Y4' url = 'https://api.telegram.org/bot1181992733:AAH79PQaMpHMJfazGSL4SZNLGSzdCWPc5Y4/getUpdates' user_dict = {} bot = telebot.TeleBot(token) class User: def __init__(self, adres): self.adres = adres def __str__(self): return self.adres @bot.message_handler(commands=['start'])
def createTables(): conn = pypyodbc.connect(CONNECTION_STRING) cur = conn.cursor() # index table: id dataset name try: cur.execute(''' CREATE TABLE index_key ( id int PRIMARY KEY IDENTITY(1,1) NOT NULL, dataset NVARCHAR(255) NOT NULL, timestamp DATETIME NOT NULL, name NVARCHAR(255) NOT NULL);''') cur.commit() print ("index_key Table Created\n") except pypyodbc.ProgrammingError as e: print (e) # structural_metrics_table: id rosetta structural metrics try: cur.execute(''' CREATE TABLE structural_metrics ( id int NOT NULL, AlaCount int NOT NULL, T1_absq int NOT NULL, T1_netq int NOT NULL, Tend_absq int NOT NULL, Tend_netq int NOT NULL, Tminus1_absq int NOT NULL, Tminus1_netq int NOT NULL, abego_res_profile float NOT NULL, abego_res_profile_penalty float NOT NULL, avg_all_frags float NOT NULL, avg_best_frag float NOT NULL, bb float NOT NULL, buns_bb_heavy int NOT NULL, buns_nonheavy int NOT NULL, buns_sc_heavy int NOT NULL, buried_minus_exposed float NOT NULL, buried_np float NOT NULL, buried_np_AFILMVWY float NOT NULL, buried_np_AFILMVWY_per_res float NOT NULL, buried_np_per_res float NOT NULL, buried_over_exposed float NOT NULL, chymo_cut_sites int NOT NULL, chymo_with_LM_cut_sites float NOT NULL, contact_all int NOT NULL, contact_core_SASA int NOT NULL, contact_core_SCN int NOT NULL, contig_not_hp_avg float NOT NULL, contig_not_hp_avg_norm float NOT NULL, contig_not_hp_internal_max int NOT NULL, contig_not_hp_max int NOT NULL, degree float NOT NULL, description NVARCHAR(255) NOT NULL, dslf_fa13 float NOT NULL, dssp NVARCHAR(255) NOT NULL, entropy float NOT NULL, exposed_hydrophobics float NOT NULL, exposed_np_AFILMVWY float NOT NULL, exposed_polars float NOT NULL, exposed_total float NOT NULL, fa_atr float NOT NULL, fa_atr_per_res float NOT NULL, fa_dun_dev float NOT NULL, fa_dun_rot float NOT NULL, fa_dun_semi float NOT NULL, fa_elec float NOT NULL, fa_intra_atr_xover4 float NOT NULL, fa_intra_sol_xover4 float NOT NULL, fa_intra_elec float NOT NULL, fa_intra_rep_xover4 float NOT NULL, fa_rep float NOT NULL, fa_rep_per_res float NOT NULL, fa_sol float NOT NULL, frac_helix float NOT NULL, frac_loop float NOT NULL, frac_sheet float NOT NULL, fxn_exposed_is_np float NOT NULL, hbond_bb_sc float NOT NULL, hbond_lr_bb float NOT NULL, hbond_lr_bb_per_sheet float NOT NULL, hbond_sc float NOT NULL, hbond_sr_bb float NOT NULL, hbond_sr_bb_per_helix float NOT NULL, helix_sc float NOT NULL, holes float NOT NULL, hphob_sc_contacts int NOT NULL, hphob_sc_degree float NOT NULL, hxl_tors float NOT NULL, hydrophobicity int NOT NULL, largest_hphob_cluster int NOT NULL, lk_ball float NOT NULL, lk_ball_bridge float NOT NULL, lk_ball_bridge_uncpl float NOT NULL, lk_ball_iso float NOT NULL, loop_sc float NOT NULL, mismatch_probability float NOT NULL, n_charged float NOT NULL, n_hphob_clusters int NOT NULL, n_hydrophobic int NOT NULL, n_hydrophobic_noA int NOT NULL, n_polar_core int NOT NULL, n_res int NOT NULL, nearest_chymo_cut_to_Cterm int NOT NULL, nearest_chymo_cut_to_Nterm int NOT NULL, nearest_chymo_cut_to_term int NOT NULL, nearest_tryp_cut_to_Cterm int NOT NULL, nearest_tryp_cut_to_Nterm int NOT NULL, nearest_tryp_cut_to_term int NOT NULL, net_atr_net_sol_per_res float NOT NULL, net_atr_per_res float NOT NULL, net_sol_per_res float NOT NULL, netcharge float NOT NULL, nres int NOT NULL, nres_helix int NOT NULL, nres_loop int NOT NULL, nres_sheet int NOT NULL, omega float NOT NULL, one_core_each float NOT NULL, p_aa_pp float NOT NULL, pack float NOT NULL, percent_core_SASA float NOT NULL, percent_core_SCN float NOT NULL, pro_close float NOT NULL, rama_prepro float NOT NULL, ref float NOT NULL, res_count_core_SASA int NOT NULL, res_count_core_SCN int NOT NULL, score_per_res float NOT NULL, sequence NVARCHAR(255) NOT NULL, ss_contributes_core float NOT NULL, ss_sc float NOT NULL, sum_best_frags float NOT NULL, total_score float NOT NULL, tryp_cut_sites int NOT NULL, two_core_each float NOT NULL, worst6frags float NOT NULL, worstfrag float NOT NULL, timestamp DATETIME NOT NULL, FOREIGN KEY ( id ) REFERENCES index_key ( id ));''') cur.commit() print ("structural_metrics Table Created\n") except pypyodbc.ProgrammingError as e: print (e) # experimental_stability_table: id experimentally determined stability measurements try: cur.execute(''' CREATE TABLE experimental_stability_scores ( id int NOT NULL, assay_library float NOT NULL, ec50_t float NOT NULL, ec50_95ci_lbound_t float NOT NULL, ec50_95ci_ubound_t float NOT NULL, ec50_95ci_t float NOT NULL, ec50_pred_t float NOT NULL, ec50_rise_t float NOT NULL, stabilityscore_t float NOT NULL, ec50_c float NOT NULL, ec50_95ci_lbound_c float NOT NULL, ec50_95ci_ubound_c float NOT NULL, ec50_95ci_c float NOT NULL, ec50_pred_c float NOT NULL, ec50_rise_c float NOT NULL, stabilityscore_c float NOT NULL, stabilityscore float NOT NULL, ec50_cnn_pred_t float NOT NULL, ec50_cnn_pred_c float NOT NULL, stabilityscore_cnn_t float NOT NULL, stabilityscore_cnn_c float NOT NULL, stabilityscore_cnn float NOT NULL, ec50_calibrated_t float NOT NULL, ec50_rise_calibrated_t float NOT NULL, stabilityscore_calibrated_t float NOT NULL, ec50_calibrated_c float NOT NULL, ec50_rise_calibrated_c float NOT NULL, stabilityscore_calibrated_c float NOT NULL, stabilityscore_calibrated float NOT NULL, stabilityscore_cnn_calibrated_t float NOT NULL, stabilityscore_cnn_calibrated_c float NOT NULL, stabilityscore_cnn_calibrated float NOT NULL, timestamp DATETIME NOT NULL, FOREIGN KEY ( id ) REFERENCES index_key ( id ));''') cur.commit() print ("experimental_stability Table Created\n") except pypyodbc.ProgrammingError as e: print (e) # configurational_entropy table: id S_PC per-residue-type Sconf try: cur.execute(''' CREATE TABLE configurational_entropy ( id int NOT NULL, S_PC float NOT NULL, Mean_H_entropy float NOT NULL, Mean_L_entropy float NOT NULL, Mean_E_entropy float NOT NULL, Mean_res_entropy float NOT NULL, SumH_entropies float NOT NULL, SumL_entropies float NOT NULL, SumE_entropies float NOT NULL, H_max_entropy float NOT NULL, H_min_entropy float NOT NULL, H_range_entropy float NOT NULL, L_max_entropy float NOT NULL, L_min_entropy float NOT NULL, L_range_entropy float NOT NULL, E_max_entropy float NOT NULL, E_min_entropy float NOT NULL, E_range_entropy float NOT NULL, timestamp DATETIME NOT NULL, FOREIGN KEY ( id ) REFERENCES index_key ( id ));''') cur.commit() print ("configurational_entropy Table Created\n") except pypyodbc.ProgrammingError as e: print (e) # two_body_descriptors table: id pairwise atom data try: cur.execute(''' CREATE TABLE two_body_descriptors ( id int NOT NULL, HH0 float NOT NULL, HH1 float NOT NULL, HH2 float NOT NULL, HH3 float NOT NULL, HH4 float NOT NULL, HH5 float NOT NULL, HH6 float NOT NULL, HH7 float NOT NULL, HH8 float NOT NULL, HH9 float NOT NULL, HH10 float NOT NULL, HH11 float NOT NULL, HH12 float NOT NULL, HH13 float NOT NULL, HH14 float NOT NULL, HC0 float NOT NULL, HC1 float NOT NULL, HC2 float NOT NULL, HC3 float NOT NULL, HC4 float NOT NULL, HC5 float NOT NULL, HC6 float NOT NULL, HC7 float NOT NULL, HC8 float NOT NULL, HC9 float NOT NULL, HC10 float NOT NULL, HC11 float NOT NULL, HC12 float NOT NULL, HC13 float NOT NULL, HC14 float NOT NULL, HN0 float NOT NULL, HN1 float NOT NULL, HN2 float NOT NULL, HN3 float NOT NULL, HN4 float NOT NULL, HN5 float NOT NULL, HN6 float NOT NULL, HN7 float NOT NULL, HN8 float NOT NULL, HN9 float NOT NULL, HN10 float NOT NULL, HN11 float NOT NULL, HN12 float NOT NULL, HN13 float NOT NULL, HN14 float NOT NULL, HO0 float NOT NULL, HO1 float NOT NULL, HO2 float NOT NULL, HO3 float NOT NULL, HO4 float NOT NULL, HO5 float NOT NULL, HO6 float NOT NULL, HO7 float NOT NULL, HO8 float NOT NULL, HO9 float NOT NULL, HO10 float NOT NULL, HO11 float NOT NULL, HO12 float NOT NULL, HO13 float NOT NULL, HO14 float NOT NULL, HS0 float NOT NULL, HS1 float NOT NULL, HS2 float NOT NULL, HS3 float NOT NULL, HS4 float NOT NULL, HS5 float NOT NULL, HS6 float NOT NULL, HS7 float NOT NULL, HS8 float NOT NULL, HS9 float NOT NULL, HS10 float NOT NULL, HS11 float NOT NULL, HS12 float NOT NULL, HS13 float NOT NULL, HS14 float NOT NULL, CC0 float NOT NULL, CC1 float NOT NULL, CC2 float NOT NULL, CC3 float NOT NULL, CC4 float NOT NULL, CC5 float NOT NULL, CC6 float NOT NULL, CC7 float NOT NULL, CC8 float NOT NULL, CC9 float NOT NULL, CC10 float NOT NULL, CC11 float NOT NULL, CC12 float NOT NULL, CC13 float NOT NULL, CC14 float NOT NULL, CN0 float NOT NULL, CN1 float NOT NULL, CN2 float NOT NULL, CN3 float NOT NULL, CN4 float NOT NULL, CN5 float NOT NULL, CN6 float NOT NULL, CN7 float NOT NULL, CN8 float NOT NULL, CN9 float NOT NULL, CN10 float NOT NULL, CN11 float NOT NULL, CN12 float NOT NULL, CN13 float NOT NULL, CN14 float NOT NULL, CO0 float NOT NULL, CO1 float NOT NULL, CO2 float NOT NULL, CO3 float NOT NULL, CO4 float NOT NULL, CO5 float NOT NULL, CO6 float NOT NULL, CO7 float NOT NULL, CO8 float NOT NULL, CO9 float NOT NULL, CO10 float NOT NULL, CO11 float NOT NULL, CO12 float NOT NULL, CO13 float NOT NULL, CO14 float NOT NULL, CS0 float NOT NULL, CS1 float NOT NULL, CS2 float NOT NULL, CS3 float NOT NULL, CS4 float NOT NULL, CS5 float NOT NULL, CS6 float NOT NULL, CS7 float NOT NULL, CS8 float NOT NULL, CS9 float NOT NULL, CS10 float NOT NULL, CS11 float NOT NULL, CS12 float NOT NULL, CS13 float NOT NULL, CS14 float NOT NULL, NN0 float NOT NULL, NN1 float NOT NULL, NN2 float NOT NULL, NN3 float NOT NULL, NN4 float NOT NULL, NN5 float NOT NULL, NN6 float NOT NULL, NN7 float NOT NULL, NN8 float NOT NULL, NN9 float NOT NULL, NN10 float NOT NULL, NN11 float NOT NULL, NN12 float NOT NULL, NN13 float NOT NULL, NN14 float NOT NULL, NO0 float NOT NULL, NO1 float NOT NULL, NO2 float NOT NULL, NO3 float NOT NULL, NO4 float NOT NULL, NO5 float NOT NULL, NO6 float NOT NULL, NO7 float NOT NULL, NO8 float NOT NULL, NO9 float NOT NULL, NO10 float NOT NULL, NO11 float NOT NULL, NO12 float NOT NULL, NO13 float NOT NULL, NO14 float NOT NULL, NS0 float NOT NULL, NS1 float NOT NULL, NS2 float NOT NULL, NS3 float NOT NULL, NS4 float NOT NULL, NS5 float NOT NULL, NS6 float NOT NULL, NS7 float NOT NULL, NS8 float NOT NULL, NS9 float NOT NULL, NS10 float NOT NULL, NS11 float NOT NULL, NS12 float NOT NULL, NS13 float NOT NULL, NS14 float NOT NULL, OO0 float NOT NULL, OO1 float NOT NULL, OO2 float NOT NULL, OO3 float NOT NULL, OO4 float NOT NULL, OO5 float NOT NULL, OO6 float NOT NULL, OO7 float NOT NULL, OO8 float NOT NULL, OO9 float NOT NULL, OO10 float NOT NULL, OO11 float NOT NULL, OO12 float NOT NULL, OO13 float NOT NULL, OO14 float NOT NULL, OS0 float NOT NULL, OS1 float NOT NULL, OS2 float NOT NULL, OS3 float NOT NULL, OS4 float NOT NULL, OS5 float NOT NULL, OS6 float NOT NULL, OS7 float NOT NULL, OS8 float NOT NULL, OS9 float NOT NULL, OS10 float NOT NULL, OS11 float NOT NULL, OS12 float NOT NULL, OS13 float NOT NULL, OS14 float NOT NULL, SS0 float NOT NULL, SS1 float NOT NULL, SS2 float NOT NULL, SS3 float NOT NULL, SS4 float NOT NULL, SS5 float NOT NULL, SS6 float NOT NULL, SS7 float NOT NULL, SS8 float NOT NULL, SS9 float NOT NULL, SS10 float NOT NULL, SS11 float NOT NULL, SS12 float NOT NULL, SS13 float NOT NULL, SS14 float NOT NULL, timestamp DATETIME NOT NULL, FOREIGN KEY ( id ) REFERENCES index_key ( id ));''') cur.commit() print ("two_body_descriptors Table Created\n") except pypyodbc.ProgrammingError as e: print (e) # metadata table: id sequence dssp topology description try: cur.execute(''' CREATE TABLE metadata ( id int NOT NULL, sequence NVARCHAR(255) NOT NULL, dssp NVARCHAR(255) NOT NULL, topology NVARCHAR(255) NOT NULL, description NVARCHAR(255) NOT NULL, timestamp DATETIME NOT NULL, FOREIGN KEY ( id ) REFERENCES index_key ( id ));''') cur.commit() print ("metadata Table Created\n") except pypyodbc.ProgrammingError as e: print (e)
import sys import os sys.path.append("Lib\site-packages") sys.path.append(os.getcwd()) import pypyodbc import time CREATE_LOCATIONS_TBL = "IF OBJECT_ID('[maphawks_flat].[dbo].[Locations]', 'U') IS NULL BEGIN CREATE TABLE Locations(ReferenceID VARCHAR(64) NOT NULL PRIMARY KEY,Name VARCHAR(64),Address VARCHAR(64) NOT NULL,City VARCHAR(64) NOT NULL,County VARCHAR(64),State VARCHAR(64) NOT NULL,PostalCode VARCHAR(64),Country VARCHAR(64),Phone VARCHAR(64),Fax VARCHAR(64),WebAddress VARCHAR(64),Latitude DECIMAL(9,6),Longitude DECIMAL(9,6),Hours VARCHAR(64),RetailOutlet VARCHAR(64),RestrictedAccess VARCHAR(64),AcceptDeposit VARCHAR(64),AcceptCash VARCHAR(64),EnvelopeRequired VARCHAR(64),OnMilitaryBase VARCHAR(64),OnPremise VARCHAR(64),Surcharge VARCHAR(64),Access VARCHAR(64),AccessNotes VARCHAR(64),InstallationType VARCHAR(64),HandicapAccess VARCHAR(64),LocationType VARCHAR(64),HoursMonOpen VARCHAR(200),HoursMonClose VARCHAR(200),HoursTueOpen VARCHAR(200),HoursTueClose VARCHAR(200),HoursWedOpen VARCHAR(200),HoursWedClose VARCHAR(200),HoursThuOpen VARCHAR(200),HoursThuClose VARCHAR(200),HoursFriOpen VARCHAR(200),HoursFriClose VARCHAR(200),HoursSatOpen VARCHAR(200),HoursSatClose VARCHAR(200),HoursSunOpen VARCHAR(200),HoursSunClose VARCHAR(200),HoursDTMonOpen VARCHAR(200),HoursDTMonClose VARCHAR(200),HoursDTTueOpen VARCHAR(200),HoursDTTueClose VARCHAR(200),HoursDTWedOpen VARCHAR(200),HoursDTWedClose VARCHAR(200),HoursDTThuOpen VARCHAR(200),HoursDTThuClose VARCHAR(200),HoursDTFriOpen VARCHAR(200),HoursDTFriClose VARCHAR(200),HoursDTSatOpen VARCHAR(200),HoursDTSatClose VARCHAR(200),HoursDTSunOpen VARCHAR(200),HoursDTSunClose VARCHAR(200),Cashless VARCHAR(64),DriveThruOnly VARCHAR(64),LimitedTransactions VARCHAR(64),MilitaryIdRequired VARCHAR(64),SelfServiceDevice VARCHAR(64),SelfServiceOnly VARCHAR(64)) END;\n" sqlCommand = "SELECT ReferenceID FROM [maphawks_flat].[dbo].[Locations];" checkTable = "" if __name__ == "__main__": conn = pypyodbc.connect( 'Driver={ODBC Driver 17 for SQL Server};Server=maphawks.database.windows.net;Database=maphawks_flat;uid=jLadera;pwd=6VxbvqwMazBseP' ) cursor = conn.cursor() try: cursor.execute(CREATE_LOCATIONS_TBL) cursor.commit() # NB : you won't get an IntegrityError when reading except (pypyodbc.ProgrammingError, pypyodbc.Error) as e: print("Error while trying to create table. Error is= ", e) time.sleep(3) try: cursor.execute(sqlCommand) checkTable = cursor.fetchall() # NB : you won't get an IntegrityError when reading
import pypyodbc from tblBrokerTransConsol import tblBrokerTransConsole from tblBrokerTransactions import tblBrokerTrans from tblFxRates import tblFxRates conenction = pypyodbc.connect( 'Driver={SQL Server};Server=.;Database=MetalTiger') print("success") #insert query ''' cursor = conenction.cursor() insert = "INSERT INTO ShareMaster(Code,Name,StockType) values('k','kk','kkk')" cursor.execute(insert) cursor.commit() cursor.execute("SELECT * FROM ShareMaster") s = '' for row in cursor: print(row) ''' cur = tblFxRates() cur.BuildCurrency() ''' sheet_name = 'Certificated CAD' if sheet_name == 'Certificated CAD': brokerconsole = tblBrokerTrans() brokerconsole.InsertDataBuild(sheet_name=sheet_name) else: brokerconsole = tblBrokerTransConsole() brokerconsole.InsertDataBuld(sheet_name=sheet_name) ''' conenction.close()
# df_final['new_patient'].replace(True,1) # df_final['ind_noshow'].fillna(0, inplace=True) df_contacto = df_unique[["id_paciente", "telefono1", "telefono2"]] df_contacto.fillna(value=pd.np.nan, inplace=True) df_contacto['telefono1'].fillna(0, inplace=True) df_contacto['telefono2'].fillna(0, inplace=True) df_contacto.loc[df_contacto.telefono1 != 0, 'telefono1'] = 1 df_contacto.loc[df_contacto.telefono2 != 0, 'telefono2'] = 1 # cantidad de numeros telefónicos df_contacto["cant_telef"] = df_contacto.telefono1 + df_contacto.telefono2 df_contacto = df_contacto[['id_paciente', 'cant_telef']] # Parametros de bases cnxn = pypyodbc.connect("Driver={SQL Server};" "Server=172.24.144.48;" "Database=NDMC_ISOFT;") # Query no show query_anul = """ SELECT CAGE_ANUL.CODIGO_CLIENTE AS ID_PACIENTE, FECHA_EMISION, FECHA, FECHA_ANUL FROM [NDMC_ISOFT].[dbo].[CAGENDAS_ANUL] CAGE_ANUL LEFT JOIN [NDMC_ISOFT].[dbo].[CLIENTES] CLI ON CLI.CODIGO_CLIENTE = CAGE_ANUL.CODIGO_CLIENTE WHERE FECHA <= (CASE WHEN DATEPART(WEEKDAY,GETDATE()) = 7 THEN DATEADD(DAY, 2, CONVERT(date,GETDATE())) ELSE DATEADD(DAY, 1, CONVERT(date,GETDATE())) END) AND FECHA >= DATEADD(DAY, -365, CONVERT(date,GETDATE())) """ df_anul = pd.read_sql_query(query_anul, cnxn)
import pypyodbc import datetime driver = 'driver={SQL Server}' username = '' password = '' server = '' database = '' conn = pypyodbc.connect(driver=driver, server=server, database=database, uid=username, pwd=password) #use the SQLServer login, not AD cur = conn.cursor() conn.execute() #changing the blank space in end= to a ',' will make it comma-delimited for d in cur.description: print(d[0], end=" ") for row in cur.fetchall(): for field in row: print(field, end=" ") print('') #constructing the sql for this doesn't seem like it'd be too hard...
#This is designed to hold the bittrex api functions accessed by the trading folder from bittrex import Bittrex from config import api_key, api_secret, dbcall from operator import itemgetter import pandas as pd import time import datetime import pypyodbc import numpy as np from sklearn import linear_model import matplotlib.pyplot as plt #Database access cnxn = pypyodbc.connect(dbcall) cursor = cnxn.cursor() def custom_term_linear_market(coin, interval, ahead): cnxn = pypyodbc.connect(dbcall) cursor = cnxn.cursor() coin = coin.replace('-', '') string = "SELECT ask, date FROM dbo.%s WHERE date > GETDATE()-%s ORDER BY date DESC" % ( coin, interval) data = pd.read_sql(string, cnxn) df = pd.DataFrame(data) df['index1'] = df.index x = len(df['index1']) + ahead #amount of intervals (coinprices) ahead linear_mod = linear_model.LinearRegression( ) #defining the linear regression model dates = np.reshape(df['index1'], (len(df['index1']), 1)) # converting to matrix of n X 1
__author__ = 'pramod' import requests import json import pypyodbc #[{"DisplayNameEnglish":"17-Hydroxypregnenolone","DisplayNameSpanish":"17-Hidroxipregnenolona","MenuId":1,"DrugPriceSet":0,"GoodRx":0,"Hcbb":0}] #MenuId is 7 for consumer search url = "https://healthcarebluebook.com/api/procedures" r = requests.get(url) # connection = pypyodbc.connect("driver={SQL Server};server=work-pc;database=test;uid=root;pwd=root") connection = pypyodbc.connect( "driver={SQL Server};server=astaging.arvixecloud.com;database=NS_Import;uid=stagingsqladmin;pwd=p@ssw0rd" ) cursor = connection.cursor() try: pricing = r.json() for info in pricing: if info['MenuId'] == 7: SQLCommand = ("INSERT INTO HBB_Procedure_Dump " "(ProcedureName, HBBUrl) " "VALUES (?,?)") Values = [ info['DisplayNameEnglish'], "https://healthcarebluebook.com/page_SearchResults.aspx?SearchTerms=" + info['DisplayNameEnglish'] ]
def file_to_db(file='', dsn='', table='', delim='\t', batchsize=100, drop_table=0, ignore_errors=0, colwidth=250, clean_colnames=0, skip_file_length=0): ''' Streams the file directly to the DB without reading into memory. Useful for when you need to send very large files to a DB. Must specify: - The source file you want to read. (file) - Delimiter for the source file. (delim) - The target table. (table) Defaults to 100 rows at a time, but can be changed with the batchsize argument. ignore_errors=1 will ignore when an error occurs and continue on. Keep in mind all records for that batch (batchsize rows) will be lost. drop_table=1 will attempt to drop the table first. colwidth allows you to change the table column widths. Default is 250. You can pass a dataset using the data argument. If this is done, reading a file will be skipped and it will go straight to sending the data. ------------------------------------------------------------------------ EXAMPLE: STREAM A CSV TO A DATABASE x = dfp.dataFile().stream_to_db( file='c:/data/x.csv' # Read the file c:/data/x.csv ,delim=',' # Set it to comma-delimited ,dsn='my_odbc_cnx' # Specify the odbc dsn ,table='table_x' # Specify the target table name ,drop_table=1 # Drop the table if exists ,batchsize=50 # Insert 50 records at a time ) ''' # Correct delimiters if needed if delim in ['tabs', 'tab', 'tsv']: delim = '\t' if delim in ['comma', 'csv']: delim = ',' if delim in ['pipe', 'psv']: delim = '|' if file == '': print('No file name supplied.') return filename = find_files(file) if len(filename) == 0: return 'No file found with name: {f}'.format(f=file) filename = filename[0] if table == '': table = 'IMPORT_TABLE_{rn}'.format(rn=random.randint(1, 9999999)) print('No table name provided. Importing to table: {table}'.format( table=table)) print('Connecting to DB: {dsn}'.format(dsn=dsn)) conn = pypyodbc.connect('DSN={dsn}'.format(dsn=dsn)) curs = conn.cursor() if drop_table == 1: try: curs.execute("DROP TABLE [{table}];".format(table=table)) curs.commit() except: print('Could not drop table: {table}'.format(table=table)) # Open the file in read mode try: infile = open(filename, 'r') except: print('Could not open file') return file_length = get_file_line_count(filename) if skip_file_length == 0 else 1 if file_length == 0: print('File length of zero.') return # Line counter for keeping track of read progress line_counter = 1 insert_sql_batch = '' # Loop through file for line in infile: # Process header row and create table with those field names if line_counter == 1: rd = csv.reader([line], delimiter=delim, quotechar='"') linedata = [x for x in rd][0] c = 0 while c < len(linedata): linedata[c] = linedata[c].strip() if linedata[c] == '': linedata[c] = 'COL{c}'.format(c=c) c += 1 if clean_colnames == 1: linedata = [ stringclean(x, nospecial=1).replace(' ', '_').replace('__', '_') for x in linedata ] sql = "CREATE TABLE {table} ({cols});".format( table=table, cols=", ".join([ " [" + stringclean(y).strip('"/. ').replace(']', '').replace( '[', '') + "] VARCHAR({cw}) ".format(cw=colwidth) for y in linedata ])) print("Table Creation SQL:") print(sql) try: curs.execute(sql) curs.commit() except: print( 'Could not create table. Probably because table already exists.' ) line_counter += 1 # All other lines besides first line else: rd = csv.reader([line], delimiter=delim, quotechar='"') linedata = [x for x in rd][0] insert_sql = 'INSERT INTO [{table_name}] VALUES ('.format( table_name=table) for col in linedata: # Remove quotes around text and replace single quotes with double-single-quotes # to be SQL safe. cleaned = stringclean(col).strip(""" "' """).replace("'", "''") insert_sql += "'{x}',".format(x=cleaned) insert_sql = insert_sql.strip(', -') + ');\n' insert_sql_batch += insert_sql pass line_counter += 1 if line_counter % batchsize == 0: try: curs.executemany(insert_sql_batch) curs.commit() except Exception as ex: print('Error occurred') print(str(ex)) if ignore_errors == 1: insert_sql_batch = '' continue if ignore_errors == 0: break insert_sql_batch = '' status_report = """{lc} / {fl} ({pct}%) {f}""".format( lc=line_counter, fl=file_length, pct=round((line_counter * 1.0) / file_length * 100, 1), f=filename[:50]) print(status_report) if line_counter % (batchsize * 5) == 0: gc.collect() try: curs.executemany(insert_sql_batch) curs.commit() except: pass return
timiI = polarit[1] timiI = timiI.strip('\n') DI[kleidi] = float(timiI) dici.close() #Access to the data base that contains the customers' evaluations and useful information for analysis (data set or training set or annotated set). db_file = "C:/PhD/Annotated1.accdb" #Add the appropriate path. user = '******' password = '' odbc_conn_str1 = 'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};' odbc_conn_str2 = "DBQ={};UID={};PWD={}".format(db_file, user, password) conn = odbc_conn_str1 + odbc_conn_str2 #odbc_conn_str = 'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s;UID=%s;PWD=%s' %\ # (db_file, user, password) conn = pypyodbc.connect(conn) rev = conn.cursor() sql = "select * from Reviews" rev.execute(sql) x = rev.fetchall() #Retrieving the customers' reviews. for pedio in x: frasi = str(pedio[3]) kwd = str(pedio[0]) #Initialization of: (1) the number of aspects in each customer's review. # (2) the overall evaluation of the quality, customer service and image functions in each customer's review. sall = 0 ns = 0 nq = 0 ni = 0
prepayId = genId() date = int(time.mktime(time.strptime(budat, '%Y%m%d'))) l.append([ prepayId, int(lifnr), bukrs, gjahr, belnr, buzei, date, dmbtr, sgtxt, now, now ]) lsh_cur.executemany( """insert into prepay_info(prepay_id, vid, company_id, year, document_id, line_no, date, it_amount, comment, created_at, updated_at) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) on duplicate key update `date`=VALUES(date), `it_amount`=VALUES(it_amount), `comment`=VALUES(comment), `updated_at` = VALUES(updated_at)""", l) lsh_con.commit() if __name__ == "__main__": cfg = json.loads(open("conf/batch.conf").read()) gConfig = cfg[cfg['env']] wm_con = pypyodbc.connect('DSN=wumart;uid=lscx;pwd=lscx;') wm_cur = wm_con.cursor() lsh_con = MySQLdb.connect(host=gConfig['sql_host'], port=gConfig['sql_port'], user=gConfig['sql_user'], passwd=gConfig['sql_passwd'], db=gConfig['sql_db'], charset="utf8") lsh_cur = lsh_con.cursor() process(lshDB=lsh_cur, wumartDB=wm_cur, redisCli=None, data=None)
from __future__ import division, print_function, absolute_import import pandas as pd import pypyodbc import numpy as np import pandas.io.sql import statsmodels.api as sm # Create the connection to DB conn = pypyodbc.connect('') #Algeria country = 'dz' currency = 'dzd' # query db sql = """ """ df = pandas.io.sql.read_sql(sql, conn) a = df.pivot_table(index=['gid', 'period'], values=['amount'], columns=['siemens_rb_detailed_element'], aggfunc=np.sum).fillna(0) a = a.loc[:, (a != 0).any(axis=0)] sql = """
# -*- coding: utf-8 -*- import scrapy import pandas as pd from scrapy.shell import inspect_response import re import urllib3 from bs4 import BeautifulSoup import pypyodbc db = pypyodbc.connect('Driver={SQL Server Native Client 11.0};' 'Server=(localdb)\MSSQLLocalDB;' 'Database=HR;') cursor = db.cursor() skillarray = [ 'java', 'ruby', 'nodejs', 'c#', 'c++', 'tsql', '.net', 'python', 'algorithm', 'jsp', 'j2ee', 't-sql', 'ios', 'android', 'javascript', 'rest', 'soap', 'servicenow', 'hybris', 'websphere', 'angularjs', 'aws' ] class Jobs: title = "" company = "" url = "" location = "" salary = "" summary = "" max_sal = 0 min_sal = 0
# # # Version Date Info # 1.0 2017 Initial Version # Hello Python 3 # -------------------------------------------------------- import os import pypyodbc as db # Hole Benutzerdefinierte Systemvariabel MYDOMAIN – von mir selbst definierte #domaine = os.getenv('MYDOMAIN') #mySQLServer = (domaine + "\SQL1") #myDBase = "Stamdaten" connect_string = 'DRIVER={SQL Server};SERVER=SQL1;DATABASE=Stammdaten;Trusted_Connection=yes' connection = db.connect(connect_string) cursor = connection.cursor() mySQLQuery = (""" SELECT id, konto, bereich, zeitstempel FROM dbo.Zuordnung_Konto_Bereich WHERE bereich = 'Rohstoffe' """) cursor.execute(mySQLQuery) results = cursor.fetchall() for row in results: id = row[0] konto = row[1] bereich = row[2]
import csv as objCSV import pypyodbc import pandas as pd con = pypyodbc.connect('Driver={SQL Server};' 'Server=localhost;' 'Database=AdventureWorks2012_Data;' 'Trusted_Connection=yes;') df = pd.read_sql("SELECT * from [dbo].[Project_Data]", con) filename = 'myRecords.csv' df.to_csv(filename, sep=',', encoding='utf-8') print(df) con.close()
def execute_sql_query(connection_string, query_string): # print(query_string) with pypyodbc.connect(connection_string) as connection: with connection.cursor() as cursor: cursor.execute(query_string)