def run_test_153(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info(conn) if server.DBMS_NAME[0:3] == "IDS": op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 0) result = ibm_db.exec_immediate(conn, "select * from org") row = ibm_db.fetch_assoc(result) while row: # printf("%4d ",row['DEPTNUMB']) # printf("%-14s ",row['DEPTNAME']) # printf("%4d ",row['MANAGER']) # printf("%-10s",row['DIVISION']) # printf("%-13s ",row['LOCATION']) # puts "" print "%4d %-14s %4d %-10s%-13s " % ( row["DEPTNUMB"], row["DEPTNAME"], row["MANAGER"], row["DIVISION"], row["LOCATION"], ) row = ibm_db.fetch_assoc(result)
def run_test_156(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info(conn) if (server.DBMS_NAME[0:3] == 'IDS'): op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 0) result = ibm_db.exec_immediate(conn, "select * from staff") row = ibm_db.fetch_assoc(result) count = 1 while (row): if (row['YEARS'] == None): row['YEARS'] = '' if (row['COMM'] == None): row['COMM'] = '' print row['ID'], row['NAME'], row['JOB'], row['YEARS'], row[ 'SALARY'], row['COMM'] row = ibm_db.fetch_assoc(result) result2 = ibm_db.exec_immediate( conn, "select * from department where substr(deptno,1,1) in ('A','B','C','D','E')" ) row2 = ibm_db.fetch_assoc(result2) while (row2): if (row2['MGRNO'] == None): row2['MGRNO'] = '' print row2['DEPTNO'], row2['DEPTNAME'], row2['MGRNO'], row2[ 'ADMRDEPT'], row2['LOCATION'] row2 = ibm_db.fetch_assoc(result2)
def run_test_155(self): conn = ibm_db.connect(config.database, config.user, config.password) serverinfo = ibm_db.server_info( conn ) result = ibm_db.exec_immediate(conn, "select * from employee where lastname in ('HAAS','THOMPSON', 'KWAN', 'GEYER', 'STERN', 'PULASKI', 'HENDERSON', 'SPENSER', 'LUCCHESSI', 'OCONNELL', 'QUINTANA', 'NICHOLLS', 'ADAMSON', 'PIANKA', 'YOSHIMURA', 'SCOUTTEN', 'WALKER', 'BROWN', 'JONES', 'LUTZ', 'JEFFERSON', 'MARINO', 'SMITH', 'JOHNSON', 'PEREZ', 'SCHNEIDER', 'PARKER', 'SMITH', 'SETRIGHT', 'MEHTA', 'LEE', 'GOUNOT')") i=0 row = ibm_db.fetch_assoc(result) while ( row ): i += 1 if (serverinfo.DBMS_NAME[0:3] == 'IDS'): if (row['midinit'] == None): row['midinit'] = '' print("%6s %12s %s %-15s%3s %4s %10s %-8s%4d %s%10s %12s %12s %12s" % \ (row['empno'], row['firstnme'], row['midinit'], row['lastname'], row['workdept'], \ row['phoneno'], row['hiredate'], row['job'], row['edlevel'], row['sex'], \ row['birthdate'], row['salary'], row['bonus'], row['comm'])) row = ibm_db.fetch_assoc(result) else: if (row['MIDINIT'] == None): row['MIDINIT'] = '' print("%6s %12s %s %-15s%3s %4s %10s %-8s%4d %s%10s %12s %12s %12s" % \ (row['EMPNO'], row['FIRSTNME'], row['MIDINIT'], row['LASTNAME'], row['WORKDEPT'], \ row['PHONENO'], row['HIREDATE'], row['JOB'], row['EDLEVEL'], row['SEX'], \ row['BIRTHDATE'], row['SALARY'], row['BONUS'], row['COMM'])) row = ibm_db.fetch_assoc(result) print("%d record(s) selected." % i)
def generalSetting(p_vehicleNumber): if(p_vehicleNumber != None): if(vehicleDetails.has_key(p_vehicleNumber)): if(vehicleDetails[p_vehicleNumber][DETAILS_BLOCK_STATUS] == 0): pubnub.publish(channel=p_vehicleNumber, message={"vehicleNumber":p_vehicleNumber,"availableBal":vehicleDetails[p_vehicleNumber][DETAILS_BALANCE],"ownerName":vehicleDetails[p_vehicleNumber][DETAILS_OWNER_NAME],\ "vehicleType":vehicleDetails[p_vehicleNumber][DETAILS_VEHICLE_TYPE]}) l_connection = dB_init() if(l_connection == None): print("Database Connection Failed on Database Query") return l_database_query = "SELECT * FROM "+DB_SCHEMA+"."+DATABASE_TABLE_NAME_1+" WHERE VEHICLE_NUMBER = '"+str(p_vehicleNumber)+"'" try: l_db_statement = ibm_db.exec_immediate(l_connection, l_database_query) l_temp_dict = ibm_db.fetch_assoc(l_db_statement) except Exception as e: logging.error("rfid Register exec/fetch_ERROR : " + str(e)) while l_temp_dict: if(l_temp_dict["VEHICLE_NUMBER"] == p_vehicleNumber): vehicleDetails[p_vehicleNumber][DETAILS_BALANCE] = l_temp_dict["WALLET_BAL"] try: l_temp_dict = ibm_db.fetch_assoc(l_db_statement) except Exception as e: logging.error("rfid Register fetch_ERROR : " + str(e)) ibm_db.free_stmt(l_db_statement) ibm_db.close(l_connection) pubnub.publish(channel=p_vehicleNumber, message={"vehicleNumber":p_vehicleNumber,"availableBal":vehicleDetails[p_vehicleNumber][DETAILS_BALANCE],"ownerName":vehicleDetails[p_vehicleNumber][DETAILS_OWNER_NAME],\ "vehicleType":vehicleDetails[p_vehicleNumber][DETAILS_VEHICLE_TYPE]}) else: appSetting(p_vehicleNumber,vehicleDetails[p_vehicleNumber][DETAILS_BLOCK_STATUS]) else: pubnub.publish(channel=p_vehicleNumber, message={"warning":"Vehicle Not Registered with the Automated System"}) else: pass
def run_test_152(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info(conn) if server.DBMS_NAME[0:3] == "IDS": op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 1) result = ibm_db.exec_immediate(conn, "select * from project") row = ibm_db.fetch_assoc(result) while row: # printf("%6s ",row['PROJNO']) # printf("%-24s ",row['PROJNAME']) # printf("%3s ",row['DEPTNO']) # printf("%6s",row['RESPEMP']) # printf("%7s ",row['PRSTAFF']) # printf("%10s ",row['PRSTDATE']) # printf("%10s ",row['PRENDATE']) # printf("%6s",row['MAJPROJ']) # puts "" if row["MAJPROJ"] == None: row["MAJPROJ"] = "" print "%6s %-24s %3s %6s%7s %10s %10s %6s" % ( row["PROJNO"], row["PROJNAME"], row["DEPTNO"], row["RESPEMP"], row["PRSTAFF"], row["PRSTDATE"], row["PRENDATE"], row["MAJPROJ"], ) row = ibm_db.fetch_assoc(result)
def run_test_150(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info( conn ) if (server.DBMS_NAME[0:3] == 'IDS'): op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 1) result = ibm_db.exec_immediate(conn, "select * from staff") row = ibm_db.fetch_assoc(result) while ( row ): #print "%5d " % row['ID'] #print "%-10s " % row['NAME'] #print "%5d " % row['DEPT'] #print "%-7s " % row['JOB'] #print "%5d " % row['YEARS'] #print "%15s " % row['SALARY'] #print "%10s " % row['COMM'] if (row['YEARS'] == None): row['YEARS'] = 0 if (row['COMM'] == None): row['COMM'] = '' print "%5d %-10s %5d %-7s %5s %15s %10s " % (row['ID'], row['NAME'], row['DEPT'], row['JOB'], row['YEARS'], row['SALARY'], row['COMM']) row = ibm_db.fetch_assoc(result)
def appTransaction(p_vehicleNum): if(vehicleDetails.has_key(p_vehicleNum)): l_connection = dB_init() if(l_connection == None): print("Database Connection Failed on Database Query") return dateorder_query = "SELECT * FROM DASH6461.TOLL_DATA WHERE VEHICLE_NUMBER = \'"+str(p_vehicleNum)+"\' ORDER BY DATES DESC,TIME DESC LIMIT 5" try: l_db_statement = ibm_db.exec_immediate(l_connection, dateorder_query) l_temp_dict = ibm_db.fetch_assoc(l_db_statement) except Exception as e: logging.error("appHistoricalGraph_twodatequery exec/fetch_ERROR : " + str(e)) l_count = 0 while l_temp_dict: l_new_date = l_temp_dict["DATES"].strftime("%d-%m-%Y") l_new_time = l_temp_dict["TIME"].strftime("%H:%M:%S") l_final_date = l_new_date + " " + l_new_time vehicleTransaction[l_count] = [l_final_date,l_temp_dict["TOLL_NAME"],l_temp_dict["AMOUNT_DEDUCT"],l_temp_dict["AMOUNT_ADDED"],l_temp_dict["AVAI_BAL"]] l_count+=1 try: l_temp_dict = ibm_db.fetch_assoc(l_db_statement) except Exception as e: logging.error("appHistoricalGraph_twodatequery fetch_ERROR : " + str(e)) transVehicle[p_vehicleNum] = vehicleTransaction ibm_db.free_stmt(l_db_statement) ibm_db.close(l_connection) pubnub.publish(channel=p_vehicleNum+p_vehicleNum,message=transVehicle[p_vehicleNum]) else: pass
def run_test_158(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info( conn ) if (server.DBMS_NAME[0:3] == 'IDS'): op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 1) result = ibm_db.exec_immediate(conn, "SELECT * FROM staff WHERE id < 50") output = '' row = ibm_db.fetch_assoc(result) while ( row ): output += str(row['ID']) + ', ' + row['NAME'] + ', ' + str(row['DEPT']) + ', ' + row['JOB'] + ', ' + str(row['YEARS']) + ', ' + str(row['SALARY']) + ', ' + str(row['COMM']) row = ibm_db.fetch_assoc(result) result2 = ibm_db.exec_immediate(conn,"SELECT * FROM department WHERE substr(deptno,1,1) in ('A','B','C','D','E')") row2 = ibm_db.fetch_assoc(result2) while ( row2 ): if (row2['MGRNO'] == None): row2['MGRNO'] = '' if (row2['LOCATION'] == None): row2['LOCATION'] = '' output += str(row2['DEPTNO']) + ', ' + row2['DEPTNAME'] + ', ' + str(row2['MGRNO']) + ', ' + row2['ADMRDEPT'] + ', ' + row2['LOCATION'] row2 = ibm_db.fetch_assoc(result2) result3 = ibm_db.exec_immediate(conn,"SELECT * FROM employee WHERE lastname IN ('HAAS','THOMPSON', 'KWAN', 'GEYER', 'STERN', 'PULASKI', 'HENDERSON', 'SPENSER', 'LUCCHESSI', 'OCONNELL', 'QUINTANA', 'NICHOLLS', 'ADAMSON', 'PIANKA', 'YOSHIMURA', 'SCOUTTEN', 'WALKER', 'BROWN', 'JONES', 'LUTZ', 'JEFFERSON', 'MARINO', 'SMITH', 'JOHNSON', 'PEREZ', 'SCHNEIDER', 'PARKER', 'SMITH', 'SETRIGHT', 'MEHTA', 'LEE', 'GOUNOT')") row3 = ibm_db.fetch_tuple(result3) while ( row3 ): output += row3[0] + ', ' + row3[3] + ', ' + row3[5] row3=ibm_db.fetch_tuple(result3) print(output)
def run_test_195(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info( conn ) if ((server.DBMS_NAME[0:3] != 'IDS') and (server.DBMS_NAME[0:2] != "AS")): drop = 'DROP TABLE test_195' try: result = ibm_db.exec_immediate(conn, drop) except: pass create = 'CREATE TABLE test_195 (id INTEGER, data XML)' result = ibm_db.exec_immediate(conn, create) insert = "INSERT INTO test_195 values (0, '<TEST><def><xml/></def></TEST>')" ibm_db.exec_immediate(conn, insert) sql = "SELECT data FROM test_195" stmt = ibm_db.prepare(conn, sql) ibm_db.execute(stmt) result = ibm_db.fetch_assoc(stmt) while( result ): print "Output:", result result = ibm_db.fetch_assoc(stmt) ibm_db.close(conn) else: print "Native XML datatype is not supported."
def collect(self, conn): sql = self.sql try: stmt = ibm_db.prepare(conn, sql) if ibm_db.execute(stmt): result = ibm_db.fetch_assoc(stmt) while result != False: if self.add_inst_name: if ('INST' not in result.keys()): result['INST'] = self.server_info['inst_name'].upper() else: result['INST'] = result['INST'].upper() if self.add_db_name: if ('DB' not in result.keys()): result['DB'] = self.server_info['db_name'].upper() else: result['DB'] = result['DB'].upper() self.publish(result) result = ibm_db.fetch_assoc(stmt) except Exception: self.log.error(traceback.format_exc()) return True
def collect(self, conn): try: # reorgchk tables (stmt, param1, param2) = ibm_db.callproc(conn, self.procname, tuple(self.params)) if stmt: result = ibm_db.fetch_assoc(stmt) while result != False: if self.add_inst_name: if ('INST' not in result.keys()): result['INST'] = self.server_info['inst_name'].upper() else: result['INST'] = result['INST'].upper() if self.add_db_name: if ('DB' not in result.keys()): result['DB'] = self.server_info['db_name'].upper() else: result['DB'] = result['DB'].upper() self.publish(result) result = ibm_db.fetch_assoc(stmt) except Exception: self.log.error(traceback.format_exc()) return True
def zins(): sql="SELECT MAX(ROUTEID) AS MAX FROM ROUTEDB" print "Generating key for RouteDB" stmt=ibm_db.exec_immediate(conn,sql) prevID=ibm_db.fetch_assoc(stmt) ID = str(prevID['MAX'] + 1) routename = '47B' sql="SELECT CID FROM CLASS WHERE MODEID = 1" stmt=ibm_db.exec_immediate(conn,sql) a=True classes=[] while a != False: a = ibm_db.fetch_assoc(stmt) if a != False: classes.append(a) a='' for i in classes: a = a + ',CID_' + str(i['CID']) + " INTEGER" a=a.replace(",","",1) sql = "CREATE TABLE ROUTE_" + routename + "(STOPID INTEGER NOT NULL PRIMARY KEY," + a + ",FOREIGN KEY(STOPID) REFERENCES STOPS)" ibm_db.exec_immediate(conn,sql) print "Route_" + routename + " table created successfully.." #ADditional code: Insert values into route_<id> table sqli="INSERT INTO ROUTE_" + routename + " VALUES(1,1,1)" ibm_db.exec_immediate(conn,sqli) print "value inserted..." sqli="INSERT INTO ROUTE_" + routename + " VALUES(2,1,1)" ibm_db.exec_immediate(conn,sqli) print "value inserted..." sqli="INSERT INTO ROUTE_" + routename + " VALUES(4,1,1)" ibm_db.exec_immediate(conn,sqli) print "value inserted..." sqli="INSERT INTO ROUTE_" + routename + " VALUES(5,1,1)" ibm_db.exec_immediate(conn,sqli) print "value inserted..." #ADditional code: Insert values into route_<id> table sql="SELECT STOPID FROM ROUTE_" + routename stmt=ibm_db.exec_immediate(conn,sql) a=True stops = [] while a != False: a = ibm_db.fetch_assoc(stmt) if a != False: stops.append(a) a='' for i in stops: a = a + ',STOP_' + str(i['STOPID']) + " TIME" a = a.replace(",","",1) sqlF="CREATE TABLE F_" + routename + "(TRIPID INTEGER,CID INTEGER," + a + ",COUNT INTEGER, FOREIGN KEY(CID) REFERENCES CLASS)" sqlR="CREATE TABLE R_" + routename + "(TRIPID INTEGER,CID INTEGER," + a + ",COUNT INTEGER, FOREIGN KEY(CID) REFERENCES CLASS)" ibm_db.exec_immediate(conn,sqlF) print "Forward journey table created.." ibm_db.exec_immediate(conn,sqlR) print "Return journey table created.." #cleanup sqlterm="INSERT INTO ROUTEDB VALUES(" + ID + ",'" + routename + "'," + "1)" ibm_db.exec_immediate(conn,sqlterm) print "All formalities completed successfully. Route successfully registered with database..."
def appHistoricalGraph(p_containerid,p_timeSpan): global DATABASE_TABLE_NAME #Connecting to the database l_connection = dB_init() if(l_connection == None): logging.error("Database Connection Failed on Database Query") return #Evaluvating the number of days to query the db p_timeSpan = p_timeSpan - 1 l_refill_history = dict() l_consumption_history = dict() l_temp_dict = dict() l_sdat = datetime.datetime.now().date() l_edat = l_sdat - datetime.timedelta(days=p_timeSpan) l_sdate = l_sdat.strftime('%Y-%m-%d') l_edate = l_edat.strftime('%Y-%m-%d') #Parsing the data from the database and update the dictionary with respective time span for i in range(p_timeSpan,-1,-1): l_edat_loop = l_sdat - datetime.timedelta(days=i) l_edate_loop = l_edat_loop.strftime('%Y-%m-%d') l_refill_history[l_edate_loop] = [p_containerid,0,0,0] l_consumption_history[l_edate_loop] = [p_containerid,0,0,0] l_twodate_query = "SELECT * FROM "+DB_SCHEMA+"."+DATABASE_TABLE_NAME +" WHERE DATES BETWEEN DATE(\'" + l_edate + "\') AND DATE(\'" + l_sdate + "\') AND SCALE_ID =" + p_containerid try: l_db_statement = ibm_db.exec_immediate(l_connection, l_twodate_query) l_temp_dict = ibm_db.fetch_assoc(l_db_statement) except Exception as e: logging.error("appHistoricalGraph_twodatequery exec/fetch_ERROR : " + str(e)) while l_temp_dict: if(l_temp_dict["SCALE_ID"] == p_containerid): l_date = l_temp_dict["DATES"].strftime('%Y-%m-%d') if(l_temp_dict["STATUS"] == 0): l_refill_history[l_date] = [l_temp_dict["SCALE_ID"],l_temp_dict["TIME"],"%.2f"%l_temp_dict["QUANTITY"],l_temp_dict["STATUS"]] else: l_consumption_history[l_date] = [l_temp_dict["SCALE_ID"],l_temp_dict["TIME"],"%.2f"%l_temp_dict["QUANTITY"],l_temp_dict["STATUS"]] try: l_temp_dict = ibm_db.fetch_assoc(l_db_statement) except Exception as e: logging.error("appHistoricalGraph_twodatequery fetch_ERROR : " + str(e)) pubnub.publish(channel="kitchenApp-refillHistory", message=l_refill_history) pubnub.publish(channel="kitchenApp-consumptionHistory", message=l_consumption_history) #deleting the history del l_refill_history,l_consumption_history #Closing the Database Connection ibm_db.free_stmt(l_db_statement) ibm_db.close(l_connection)
def print_results(conn,sql): #sql = "SELECT " + column + " FROM " + table + ";" print "SQL: ",sql stmt = ibm_db.exec_immediate(conn,sql) dictionary = ibm_db.fetch_assoc(stmt) while dictionary != False: print '===================' print "ISBN: ", dictionary["ISBN"] print "Title: ",dictionary["TITLE"] print "Author: ",dictionary["AUTHOR"] dictionary = ibm_db.fetch_assoc(stmt)
def mdr_account_info_from_query(): """ MDR account data loaded from Ragnar's query :return: """ status = connect_mdr_db() print 48 * "-" print "# Generating MDR SL Account storage" print 48 * "-" if status[0] == 0: conn = status[1] sql = "with CloudTeamNames (SL_Account_Name,PROJECT_ID, DOU_ID,PROJECT_NAME, DOUSTATUS, \ DOU_VERSION, DOU_NOTES_DOC_ID, MAJOR, MINOR, ACCOUNT_GROUP_ID, \ PERFORMING_ICA_NUM, RECEIVING_ICA_NUM, ACCOUNT_ID, BILLING_METHOD_NAME,NOTES_PROJECT_ID, DOU_Name, PROJECT_YEAR, \ STRATEGIC_OFFERING_NAME,Y_to_Y_Source_Project_ID,TUAM_BILLABLE,RowNum) as ( \ SELECT distinct billing_code as SL_Account_Name,dou.PROJECT_ID, dou.DOU_ID, dou.PROJECT_NAME, \ DOUSTATUS, DOU_VERSION, DOU_NOTES_DOC_ID, MAJOR, MINOR, ACCOUNT_GROUP_ID, \ PERFORMING_ICA_NUM, RECEIVING_ICA_NUM, ACCOUNT_ID, BILLING_METHOD_NAME, \ NOTES_PROJECT_ID, dou.PROJECT_NAME, dou.PROJECT_YEAR, STRATEGIC_OFFERING_NAME,prj.Y_to_Y_Source_Project_ID, \ case when TUAM_BILLABLE_Flag=1 then 'Yes' else 'No' end, \ ROW_NUMBER() OVER(partition by prj.Y_to_Y_Source_Project_ID,ds.billing_code order by dou.project_year desc,ds.billing_code) \ FROM dstrpt.Listof_Pipeline_Prjdous dou \ inner join DST.DOU_SERVICE ds on dou.dou_id=ds.dou_id \ inner join dst.project prj on prj.Project_ID=dou.Project_id \ where not(ds.billing_code is null) and ds.billing_code<>'' \ order by dou.project_year desc,dou.project_id,ds.billing_code) \ select * from cloudTeamNames \ where rownum=1 and SL_Account_Name like 'SL%'" stmt = ibm_db.exec_immediate(conn, sql) dictionary = ibm_db.fetch_assoc(stmt) while dictionary: mdr = dict() mdr['CLOUD_TEAM_NAME'] = dictionary['SL_ACCOUNT_NAME'] mdr['PROJECT_ID'] = dictionary['PROJECT_ID'] mdr['DOU_ID'] = dictionary['DOU_ID'] mdr['DOUSTATUS'] = dictionary['DOUSTATUS'] mdr['DOU_VERSION'] = dictionary['DOU_VERSION'] mdr['DOU_NOTES_DOC_ID'] = dictionary['DOU_NOTES_DOC_ID'] mdr['MAJOR'] = dictionary['MAJOR'] mdr['MINOR'] = dictionary['MINOR'] mdr['ACCOUNT_GROUP_ID'] = dictionary['ACCOUNT_GROUP_ID'] mdr['PERFORMING_ICA_NUM'] = dictionary['PERFORMING_ICA_NUM'] mdr['RECEIVING_ICA_NUM'] = dictionary['RECEIVING_ICA_NUM'] mdr['ACCOUNT_ID'] = dictionary['ACCOUNT_ID'] mdr['BILLING_METHOD_NAME'] = dictionary['BILLING_METHOD_NAME'] mdr['NOTES_PROJECT_ID'] = dictionary['NOTES_PROJECT_ID'] mdr['PROJECT_NAME'] = dictionary['PROJECT_NAME'] mdr['PROJECT_YEAR'] = dictionary['PROJECT_YEAR'] mdr['STRATEGIC_OFFERING_NAME'] = dictionary['ACCOUNT_GROUP_ID'] mdr['MDR or DOU source'] = dictionary['Y_TO_Y_SOURCE_PROJECT_ID'] mdr['TUAM_BILLABLE'] = dictionary['TUAM_BILLABLE'] mdr_account_data.append(mdr) dictionary = ibm_db.fetch_assoc(stmt)
def WelcomeToMyapp(): string = "new" if db2conn: stmt = ibm_db.exec_immediate(db2conn,"SELECT * FROM DASH5369.CSIE LIMIT 1;") # fetch the result result = ibm_db.fetch_assoc(stmt) string = '' while result != False: string += result['TIME_STAMP'] + ' ' + result['SIGNAL_SENT'] + '\n' result = ibm_db.fetch_assoc(stmt) return string
def getActualResultList(stmtHdl, fetchNum): resultList = [] if fetchNum == None: resultDict = ibm_db.fetch_assoc(stmtHdl) if not resultDict: return resultList while resultDict: resultList.append(resultDict) resultDict = ibm_db.fetch_assoc(stmtHdl) return resultList else: for n in range(fetchNum): resultDict = ibm_db.fetch_assoc(stmtHdl) resultList.append(resultDict) return resultList
def getTableList(): sqlstmt="select count(*) from syscat.tables where tabschema = current user" print sqlstmt stmt = ibm_db.exec_immediate(db2conn, sqlstmt) result = ibm_db.fetch_assoc(stmt) return result
def dbinfo(): if db2conn: # we have a DB2 connection, so obtain system information via ENV_SYS_INFO: stmt = ibm_db.exec_immediate(db2conn,"select * from sysibmadm.env_sys_info") # fetch the result result = ibm_db.fetch_assoc(stmt) return render_template('dbinfo.html', db2info=result)
def run_test_159(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info( conn ) if (server.DBMS_NAME[0:3] == 'IDS'): op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 1) result = ibm_db.exec_immediate(conn, "select name,job from staff") i = 1 row = ibm_db.fetch_assoc(result) while ( row ): #printf("%3d %10s %10s\n",i, row['NAME'], row['JOB']) print("%3d %10s %10s" % (i, row['NAME'], row['JOB'])) i += 1 row = ibm_db.fetch_assoc(result)
def run_test_034(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info(conn) if server.DBMS_NAME[0:3] == "IDS": op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 1) result = ibm_db.exec_immediate(conn, "select * from staff") row = ibm_db.fetch_assoc(result) if row: # printf("%5d ",row['ID']) # printf("%-10s ",row['NAME']) # printf("%5d ",row['DEPT']) # printf("%-7s ",row['JOB']) # printf("%5d ", row['YEARS']) # printf("%15s ", row['SALARY']) # printf("%10s ", row['COMM']) # puts "" print "%5d %-10s %5d %-7s %5d %15s %10s" % ( row["ID"], row["NAME"], row["DEPT"], row["JOB"], row["YEARS"], row["SALARY"], row["COMM"], ) ibm_db.close(conn)
def getTableCount(): sqlstmt="select count(*) from dialogdata" print sqlstmt # execute SQL query and fetch single result stmt = ibm_db.exec_immediate(db2conn, sqlstmt) result = ibm_db.fetch_assoc(stmt) return result
def run_test_159a(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info(conn) if server.DBMS_NAME[0:3] == "IDS": op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 1) result = ibm_db.exec_immediate(conn, "select prstdate,prendate from project") i = 1 row = ibm_db.fetch_assoc(result) while row: # printf("%3d %10s %10s\n",i, row['PRSTDATE'], row['PRENDATE']) print "%3d %10s %10s" % (i, row["PRSTDATE"], row["PRENDATE"]) i += 1 row = ibm_db.fetch_assoc(result)
def queryDict( self, _sql ): stmt = ibm_db.exec_immediate( self.connection, _sql ) dictionary = [] item = ibm_db.fetch_assoc( stmt ) while item != False: dictionary.append( item ) item = ibm_db.fetch_tuple( stmt ) return dictionary
def run_test_133(self): conn = ibm_db.connect(config.database, config.user, config.password) if (not conn): print "Connection failed." return 0 ibm_db.autocommit(conn, ibm_db.SQL_AUTOCOMMIT_OFF) print "Starting test ..." res = '' sql = "INSERT INTO animals (id, breed, name, weight) VALUES (?, ?, ?, ?)" try: stmt = ibm_db.prepare(conn, sql) res = ibm_db.execute(stmt,(128, 'hacker of human and technological nature', 'Wez the ruler of all things PECL', 88.3)) stmt = ibm_db.prepare(conn, "SELECT breed, name FROM animals WHERE id = ?") res = ibm_db.execute(stmt, (128,)) row = ibm_db.fetch_assoc(stmt) for i in row: print i ibm_db.rollback(conn) print "Done" except: print "SQLSTATE: %s" % ibm_db.stmt_error(stmt) print "Message: %s" % ibm_db.stmt_errormsg(stmt) try: stmt = ibm_db.prepare(conn, "SELECT breed, name FROM animals WHERE id = ?") res = ibm_db.execute(stmt, (128,)) row = ibm_db.fetch_assoc(stmt) if (row): for i in row: print i print res print "SQLSTATE: %s" % ibm_db.stmt_error(stmt) print "Message: %s" % ibm_db.stmt_errormsg(stmt) except: print "An Exception is not expected" print "SQLSTATE: %s" % ibm_db.stmt_error(stmt) print "Message: %s" % ibm_db.stmt_errormsg(stmt) ibm_db.rollback(conn) print "Done"
def get_nodes(conn,tablename): sql = "SELECT DISTINCT * FROM DTABLES WHERE TNAME = '" + tablename + "';" print "\t",sql stmt = ibm_db.exec_immediate(conn,sql) dictionary = ibm_db.fetch_assoc(stmt) nodes = [] while dictionary != False: url = dictionary["NODEURL"].rstrip() host = url[url.rfind('/')+1:] print host user = dictionary["NODEUSER"] passwd = dictionary["NODEPASSWD"] driver = dictionary["NODEDRIVER"] node = (url,host,user,passwd,driver) nodes.append(node) dictionary = ibm_db.fetch_assoc(stmt) return nodes
def read_dbInsparams(con, mess,dbInsParams): sqlQ = 'select name,value from SYSIBMADM.DBMCFG' stmt = ibm_db.exec_immediate(con, sqlQ) if not stmt: mess[0] = sqlQ + "->" + ibm_db.stmt_errormsg(stmt) module.fail_json(msg=mess, changed=False) raise dictionary = ibm_db.fetch_assoc(stmt) while dictionary != False: name = dictionary["NAME"] value = dictionary["VALUE"] dbInsParams.append(dict(name=name,value=value)) dictionary = ibm_db.fetch_assoc(stmt) return True
def node_id(db,nodeid): try: sql = "SELECT nodeurl from dtables where nodeid = "+nodeid+ ";" stmt = ibm_db.exec_immediate(conn,sql) dictionary = ibm_db.fetch_assoc(stmt) while dictionary != False: url = dictionary["nodeurl"] return url except:
def fetchRecordForName(username): sqlstmt="select * from dialogdata where fname='"+username+"'" # Some debugging print sqlstmt # execute SQL query and fetch single result stmt = ibm_db.exec_immediate(db2conn, sqlstmt) result = ibm_db.fetch_assoc(stmt) return result
def listInsights(self): if self.sqlConn: stmt = ibm_db.exec_immediate(self.sqlConn,"select * from insights order by id desc") print stmt result = ibm_db.fetch_assoc(stmt) print result return result else: print("ERROR: Connection not found")
def run_test_066(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info(conn) if (server.DBMS_NAME[0:3] == 'IDS'): result = ibm_db.tables(conn, None, config.user.lower(), 'animals') else: result = ibm_db.tables(conn, None, config.user.upper(), 'ANIMALS') # NOTE: This is a workaround # function fetch_object() to be implemented... # row = ibm_db.fetch_object(result) class Row: pass data = ibm_db.fetch_assoc(result) while (data): row = Row() if (server.DBMS_NAME[0:3] == 'IDS'): row.table_schem = data['table_schem'] row.table_name = data['table_name'] row.table_type = data['table_type'] row.remarks = data['remarks'] print("Schema: %s" % row.table_schem) print("Name: %s" % row.table_name) print("Type: %s" % row.table_type) print("Remarks: %s\n" % row.remarks) else: row.TABLE_SCHEM = data['TABLE_SCHEM'] row.TABLE_NAME = data['TABLE_NAME'] row.TABLE_TYPE = data['TABLE_TYPE'] row.REMARKS = data['REMARKS'] print("Schema: %s" % row.TABLE_SCHEM) print("Name: %s" % row.TABLE_NAME) print("Type: %s" % row.TABLE_TYPE) print("Remarks: %s\n" % row.REMARKS) # row = ibm_db.fetch_object(result) data = ibm_db.fetch_assoc(result) if (server.DBMS_NAME[0:3] == 'IDS'): result = ibm_db.tables(conn, None, config.user.lower(), 'animal_pics') else: result = ibm_db.tables(conn, None, config.user.upper(), 'ANIMAL_PICS') # row = ibm_db.fetch_object(result) data = ibm_db.fetch_assoc(result) while (data): row = Row() if (server.DBMS_NAME[0:3] == 'IDS'): row.table_schem = data['table_schem'] row.table_name = data['table_name'] row.table_type = data['table_type'] row.remarks = data['remarks'] print("Schema: %s" % row.table_schem) print("Name: %s" % row.table_name) print("Type: %s" % row.table_type) print("Remarks: %s\n" % row.remarks) else: row.TABLE_SCHEM = data['TABLE_SCHEM'] row.TABLE_NAME = data['TABLE_NAME'] row.TABLE_TYPE = data['TABLE_TYPE'] row.REMARKS = data['REMARKS'] print("Schema: %s" % row.TABLE_SCHEM) print("Name: %s" % row.TABLE_NAME) print("Type: %s" % row.TABLE_TYPE) print("Remarks: %s\n" % row.REMARKS) # row = ibm_db.fetch_object(result) data = ibm_db.fetch_assoc(result) if (server.DBMS_NAME[0:3] == 'IDS'): result = ibm_db.tables(conn, None, config.user.lower(), 'anime_cat') else: result = ibm_db.tables(conn, None, config.user.upper(), 'ANIME_CAT') # row = ibm_db.fetch_object(result) data = ibm_db.fetch_assoc(result) while (data): row = Row() if (server.DBMS_NAME[0:3] == 'IDS'): row.table_schem = data['table_schem'] row.table_name = data['table_name'] row.table_type = data['table_type'] row.remarks = data['remarks'] print("Schema: %s" % row.table_schem) print("Name: %s" % row.table_name) print("Type: %s" % row.table_type) print("Remarks: %s\n" % row.remarks) else: row.TABLE_SCHEM = data['TABLE_SCHEM'] row.TABLE_NAME = data['TABLE_NAME'] row.TABLE_TYPE = data['TABLE_TYPE'] row.REMARKS = data['REMARKS'] print("Schema: %s" % row.TABLE_SCHEM) print("Name: %s" % row.TABLE_NAME) print("Type: %s" % row.TABLE_TYPE) print("Remarks: %s\n" % row.REMARKS) # row = ibm_db.fetch_object(result) data = ibm_db.fetch_assoc(result) ibm_db.free_result(result) ibm_db.close(conn)
def get_row(result): '获取查询结果中的行数据' row = ibm_db.fetch_assoc(result) return row
def get_job_runs(terms): # Calculate how many jobs have run so far sql = ibm_db.exec_immediate(conn, 'select count(*) from tweet_counts') row_counts = int(ibm_db.fetch_assoc(sql)['1']) job_runs = row_counts / len(query_terms) return job_runs
def run_test_154(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info(conn) if (server.DBMS_NAME[0:3] == 'IDS'): op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 1) try: statement = 'DROP TABLE fetch_test' result = ibm_db.exec_immediate(conn, statement) except: pass server = ibm_db.server_info(conn) if (server.DBMS_NAME[0:3] == 'IDS'): statement = 'CREATE TABLE fetch_test (col1 VARCHAR(20), col2 CLOB, col3 INTEGER)' st0 = "INSERT INTO fetch_test VALUES ('column 0', 'Data in the clob 0', 0)" st1 = "INSERT INTO fetch_test VALUES ('column 1', 'Data in the clob 1', 1)" st2 = "INSERT INTO fetch_test VALUES ('column 2', 'Data in the clob 2', 2)" st3 = "INSERT INTO fetch_test VALUES ('column 3', 'Data in the clob 3', 3)" else: statement = 'CREATE TABLE fetch_test (col1 VARCHAR(20), col2 CLOB(20), col3 INTEGER)' st0 = "INSERT INTO fetch_test VALUES ('column 0', 'Data in the clob 0', 0)" st1 = "INSERT INTO fetch_test VALUES ('column 1', 'Data in the clob 1', 1)" st2 = "INSERT INTO fetch_test VALUES ('column 2', 'Data in the clob 2', 2)" st3 = "INSERT INTO fetch_test VALUES ('column 3', 'Data in the clob 3', 3)" result = ibm_db.exec_immediate(conn, statement) result = ibm_db.exec_immediate(conn, st0) result = ibm_db.exec_immediate(conn, st1) result = ibm_db.exec_immediate(conn, st2) result = ibm_db.exec_immediate(conn, st3) statement = "SELECT col1, col2 FROM fetch_test" result = ibm_db.prepare(conn, statement) ibm_db.execute(result) row = ibm_db.fetch_tuple(result) while (row): #printf("\"%s\" from VARCHAR is %d bytes long, \"%s\" from CLOB is %d bytes long.\n", # row[0],row[0].length, row[1],row[1].length) print("\"%s\" from VARCHAR is %d bytes long, \"%s\" from CLOB is %d bytes long." %\ (row[0], len(row[0]), row[1], len(row[1]))) row = ibm_db.fetch_tuple(result) result = ibm_db.prepare(conn, statement) ibm_db.execute(result) row = ibm_db.fetch_assoc(result) while (row): #printf("\"%s\" from VARCHAR is %d bytes long, \"%s\" from CLOB is %d bytes long.\n", # row['COL1'], row['COL1'].length, row['COL2'], row['COL2'].length) print("\"%s\" from VARCHAR is %d bytes long, \"%s\" from CLOB is %d bytes long." %\ (row['COL1'], len(row['COL1']), row['COL2'], len(row['COL2']))) row = ibm_db.fetch_assoc(result) result = ibm_db.prepare(conn, statement) ibm_db.execute(result) row = ibm_db.fetch_both(result) while (row): #printf("\"%s\" from VARCHAR is %d bytes long, \"%s\" from CLOB is %d bytes long.\n", # row['COL1'], row['COL1'].length, row[1], row[1].length) print("\"%s\" from VARCHAR is %d bytes long, \"%s\" from CLOB is %d bytes long.\n" % \ (row['COL1'],len(row['COL1']), row[1], len(row[1]))) row = ibm_db.fetch_both(result) ibm_db.close(conn)
def main(): # get diagpath cfgs = subprocess.check_output("db2 get dbm cfg", shell=True).decode('ascii').splitlines() diagpath = [ cfg.split('=')[1].strip() for cfg in cfgs if "Diagnostic data directory path" in cfg ][0] # parse arguments description = "Setup IBMHIST monitoring program. View README for more information." parser = argparse.ArgumentParser(description=description) # database arguments parser.add_argument('database', help='name of database') parser.add_argument( '-un', '--username', metavar='', default='', help= 'username used to connect to database, default is that of current user' ) parser.add_argument( '-pw', '--password', metavar='', default='', help= 'password used to connect to database, default is that of current user' ) # flags parser.add_argument( '-c', '--cleanup', metavar='', action='store_const', const=True, default=False, help= 'will drop the IBMHIST schema and all of its objects, and unschedule its tasks' ) parser.add_argument( '-uc', '--update_config_only', metavar='', action='store_const', const=True, default=False, help= 'will update config values to passed in arguments or reset to defaults for those not explicitly specified' ) parser.add_argument( '-ut', '--update_tasks_only', metavar='', action='store_const', const=True, default=False, help= 'will unschedule all old tasks and reschedule to match with the task_details.json file' ) # path of bldrtn file parser.add_argument( '-bp', '--bldrtn_path', metavar='', default="C:\\Program Files\\IBM\\SQLLIB\\samples\\cpp\\bldrtn.bat" if os.name == 'nt' else os.path.expanduser("~/sqllib/samples/cpp/bldrtn"), help= "specify path for bldrtn script if it is not automatically found, default: %(default)s" ) # collection/archival paths and max size parser.add_argument( '-cp', '--coll_path', metavar='', default=os.path.abspath(diagpath), help='directory path of data collection, default: %(default)s') parser.add_argument( '-ap', '--arch_path', metavar='', default=os.path.abspath(diagpath), help='directory path of data archival, default: %(default)s') parser.add_argument( '-ms', '--max_size', metavar='', type=int, default=1073741824, help= 'max size in bytes of collection and archival, default: %(default)s') # archival command and extension parser.add_argument( '-acmd', '--arch_cmd', metavar='', default= 'powershell -command "Compress-Archive" -Path _src_ -DestinationPath _dest_' if os.name == 'nt' else 'tar -caf _dest_ _src_', help= 'command used to archive hourly folders with "_src_" and "_dest_" placeholders, default: %(default)s' ) parser.add_argument( '-aext', '--arch_ext', metavar='', default='.zip' if os.name == 'nt' else '.tar.gz', help= 'extension of archived hourly folders beginning with ".", default: %(default)s' ) # collection level of tasks parser.add_argument( '-lvl', '--coll_lvl', metavar='', type=int, help= 'scope of tasks to schedule (1 will only schedule key tasks, 2 will schedule more), all tasks scheduled if unspecified' ) # parse and transform arguments args = parser.parse_args() args.database = args.database.upper() args.bldrtn_path = os.path.abspath(args.bldrtn_path) args.coll_path, args.arch_path, args.max_size = os.path.abspath( args.coll_path), os.path.abspath(args.arch_path), str(args.max_size) # try to connect to database print("Connecting to database: %s" % args.database) conn = ibm_db.connect(args.database, args.username, args.password) # create tablespace SYSTOOLSPACE stmt = ibm_db.exec_immediate( conn, "SELECT TBSPACE FROM SYSCAT.TABLESPACES WHERE TBSPACE = 'SYSTOOLSPACE'" ) if not ibm_db.fetch_assoc(stmt): print("Creating tablespace SYSTOOLSPACE ...") ibm_db.exec_immediate( conn, "CREATE TABLESPACE SYSTOOLSPACE IN IBMCATGROUP MANAGED BY AUTOMATIC STORAGE EXTENTSIZE 4" ) if args.cleanup: # unschedule old tasks unschedule_tasks(conn) # drop old IBMHIST schema and its objects drop_IBMHIST(conn) elif args.update_config_only: # test arguments and set configurations in IBMHIST.TAB_CONFIG config_IBMHIST(conn, args.coll_path, args.arch_path, args.max_size, args.arch_cmd, args.arch_ext) elif args.update_tasks_only: # determine if Windows/Unix, pureScale, HADR environment env = get_env(conn, args.database) # unschedule old tasks unschedule_tasks(conn) # schedule collection tasks from task_details.json file and archive task schedule_tasks(conn, args.database, args.coll_lvl, env) else: # determine if Windows/Unix, pureScale, HADR environment env = get_env(conn, args.database) # unschedule old tasks unschedule_tasks(conn) # drop old IBMHIST schema and its objects drop_IBMHIST(conn) # setup IBMHIST schema and register all table and procedure objects setup_IBMHIST(conn, args.bldrtn_path) # test arguments and set configurations in IBMHIST.TAB_CONFIG config_IBMHIST(conn, args.coll_path, args.arch_path, args.max_size, args.arch_cmd, args.arch_ext) # test basic functionality of IBMHIST.PROC_COLLECT and IBMHIST.PROC_ARCHIVE test_IBMHIST(conn, args.database) # schedule collection tasks from task_details.json file and archive task schedule_tasks(conn, args.database, args.coll_lvl, env) # close connection print("Closing connection ...") ibm_db.close(conn) print("Done")
def run_save_submission_results(params): try: cos_everest_submission_bucket = params.get( "cos_everest_submission_bucket", None) if cos_everest_submission_bucket is None or "": raise Exception("Pass location of the bucket") mode = params.get("mode", None) if mode is None or "": raise Exception("Pass RUNTIME or TRAINING") status = params.get("status", None) if status is None or "": raise Exception("Pass status ") model_id = params.get("model_id", None) if model_id is None or "": raise Exception("Pass model_id") standardized_txt_dir = params.get("standardized_txt_dir", None) if standardized_txt_dir is None or "": raise Exception("Pass standardized_txt_dir") db_conn = db2utils.get_connection() print("db_conn: {}".format(db_conn)) if status: sql = f'''SELECT ID, USED_FOR FROM EVERESTSCHEMA.EVRE_LEARNING_EMAIL_MSGS where USED_FOR = '{mode}' and status = '{status}' order by ID ''' else: sql = f'''SELECT ID, USED_FOR FROM EVERESTSCHEMA.EVRE_LEARNING_EMAIL_MSGS where USED_FOR = '{mode}' order by ID ''' print("sql: {}".format(sql)) stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_assoc(stmt) result_dict = {} while result: id = str(result["ID"]) mode = result["USED_FOR"] param = { 'cos_everest_submission_bucket': cos_everest_submission_bucket, 'submission_id': id, 'submissions_data_folder': 'submission_documents_data', 'standardized_txt_dir': standardized_txt_dir, 'mode': mode, 'model_id': model_id } save_submission_results.main(param) # time.sleep(2) print(f'Apply NLP and save results:: {id, param}') result = ibm_db.fetch_assoc(stmt) result_dict = {} result_dict["status"] = "SUCCESS" except (ibm_db.conn_error, ibm_db.conn_errormsg, Exception) as err: logging.exception(err) result_dict = {} result_dict["status"] = "FAILURE" return {"result": "This flow should get executed"}
def run_test_133(self): conn = ibm_db.connect(config.database, config.user, config.password) if (not conn): print("Connection failed.") return 0 ibm_db.autocommit(conn, ibm_db.SQL_AUTOCOMMIT_OFF) print("Starting test ...") res = '' sql = "INSERT INTO animals (id, breed, name, weight) VALUES (?, ?, ?, ?)" try: stmt = ibm_db.prepare(conn, sql) res = ibm_db.execute( stmt, (128, 'hacker of human and technological nature', 'Wez the ruler of all things PECL', 88.3)) stmt = ibm_db.prepare( conn, "SELECT breed, name FROM animals WHERE id = ?") res = ibm_db.execute(stmt, (128, )) row = ibm_db.fetch_assoc(stmt) for i in row: print(i) ibm_db.rollback(conn) print("Done") except: print("SQLSTATE: %s" % ibm_db.stmt_error(stmt)) print("Message: %s" % ibm_db.stmt_errormsg(stmt)) try: stmt = ibm_db.prepare( conn, "SELECT breed, name FROM animals WHERE id = ?") res = ibm_db.execute(stmt, (128, )) row = ibm_db.fetch_assoc(stmt) if (row): for i in row: print(i) print(res) print("SQLSTATE: %s" % ibm_db.stmt_error(stmt)) print("Message: %s" % ibm_db.stmt_errormsg(stmt)) except: print("An Exception is not expected") print("SQLSTATE: %s" % ibm_db.stmt_error(stmt)) print("Message: %s" % ibm_db.stmt_errormsg(stmt)) ibm_db.rollback(conn) print("Done") #__END__ #__LUW_EXPECTED__ #Starting test ... # #SQLSTATE: 22001 #Message: [IBM][CLI Driver] CLI0109E String data right truncation. SQLSTATE=22001 SQLCODE=-99999 #True #SQLSTATE: 02000 #Message: [IBM][CLI Driver][DB2/%s] SQL0100W No row was found for FETCH, UPDATE or DELETE; or the result of a query is an empty table. SQLSTATE=02000 SQLCODE=100 #Done #__ZOS_EXPECTED__ #Starting test ... # #SQLSTATE: 22001 #Message: [IBM][CLI Driver] CLI0109E String data right truncation. SQLSTATE=22001 SQLCODE=-99999 #True #SQLSTATE: 02000 #Message: [IBM][CLI Driver][DB2] SQL0100W No row was found for FETCH, UPDATE or DELETE; or the result of a query is an empty table. SQLSTATE=02000 SQLCODE=100 #Done #__SYSTEMI_EXPECTED__ #Starting test ... # #SQLSTATE: 22001 #Message: [IBM][CLI Driver] CLI0109E String data right truncation. SQLSTATE=22001 SQLCODE=-99999 #True #SQLSTATE: 02000 #Message: [IBM][CLI Driver][AS] SQL0100W No row was found for FETCH, UPDATE or DELETE; or the result of a query is an empty table. SQLSTATE=02000 SQLCODE=100 #Done #__IDS_EXPECTED__ #Starting test ... # #SQLSTATE: 22001 #Message: [IBM][CLI Driver][IDS%s] Value exceeds string column length. SQLCODE=-1279 #True #SQLSTATE: 02000 #Message: [IBM][CLI Driver][IDS%s] SQL0100W No row was found for FETCH, UPDATE or DELETE; or the result of a query is an empty table. SQLSTATE=02000 SQLCODE=100 #Done
def fecth_sql(conn, sql): logger.info("Running sql: %s", sql) stmt = ibm_db.exec_immediate(conn, sql) return ibm_db.fetch_assoc(stmt)
#!python import ibm_db myConnection = ibm_db.connect("SAMPLE", "fuzzy", "fuzzy") if myConnection: mySQL = "select firstnme, lastname from fuzzy.employee" myStmt = ibm_db.exec_immediate(myConnection, mySQL) row = ibm_db.fetch_assoc(myStmt) while row: print row row = ibm_db.fetch_assoc(myStmt)
def run_test_156(self): conn = ibm_db.connect(config.database, config.user, config.password) server = ibm_db.server_info(conn) if (server.DBMS_NAME[0:3] == 'IDS'): op = {ibm_db.ATTR_CASE: ibm_db.CASE_UPPER} ibm_db.set_option(conn, op, 1) result = ibm_db.exec_immediate(conn, "select * from staff") row = ibm_db.fetch_assoc(result) count = 1 while (row): if (row['YEARS'] == None): row['YEARS'] = '' if (row['COMM'] == None): row['COMM'] = '' print(row['ID'], row['NAME'], row['JOB'], row['YEARS'], row['SALARY'], row['COMM']) row = ibm_db.fetch_assoc(result) result2 = ibm_db.exec_immediate( conn, "select * from department where substr(deptno,1,1) in ('A','B','C','D','E')" ) row2 = ibm_db.fetch_assoc(result2) while (row2): if (row2['MGRNO'] == None): row2['MGRNO'] = '' print(row2['DEPTNO'], row2['DEPTNAME'], row2['MGRNO'], row2['ADMRDEPT'], row2['LOCATION']) row2 = ibm_db.fetch_assoc(result2) #__END__ #__LUW_EXPECTED__ #10 Sanders Mgr 7 18357.50 #20 Pernal Sales 8 18171.25 612.45 #30 Marenghi Mgr 5 17506.75 #40 OBrien Sales 6 18006.00 846.55 #50 Hanes Mgr 10 20659.80 #60 Quigley Sales 16808.30 650.25 #70 Rothman Sales 7 16502.83 1152.00 #80 James Clerk 13504.60 128.20 #90 Koonitz Sales 6 18001.75 1386.70 #100 Plotz Mgr 7 18352.80 #110 Ngan Clerk 5 12508.20 206.60 #120 Naughton Clerk 12954.75 180.00 #130 Yamaguchi Clerk 6 10505.90 75.60 #140 Fraye Mgr 6 21150.00 #150 Williams Sales 6 19456.50 637.65 #160 Molinare Mgr 7 22959.20 #170 Kermisch Clerk 4 12258.50 110.10 #180 Abrahams Clerk 3 12009.75 236.50 #190 Sneider Clerk 8 14252.75 126.50 #200 Scoutten Clerk 11508.60 84.20 #210 Lu Mgr 10 20010.00 #220 Smith Sales 7 17654.50 992.80 #230 Lundquist Clerk 3 13369.80 189.65 #240 Daniels Mgr 5 19260.25 #250 Wheeler Clerk 6 14460.00 513.30 #260 Jones Mgr 12 21234.00 #270 Lea Mgr 9 18555.50 #280 Wilson Sales 9 18674.50 811.50 #290 Quill Mgr 10 19818.00 #300 Davis Sales 5 15454.50 806.10 #310 Graham Sales 13 21000.00 200.30 #320 Gonzales Sales 4 16858.20 844.00 #330 Burke Clerk 1 10988.00 55.50 #340 Edwards Sales 7 17844.00 1285.00 #350 Gafney Clerk 5 13030.50 188.00 #A00 SPIFFY COMPUTER SERVICE DIV. 000010 A00 None #B01 PLANNING 000020 A00 None #C01 INFORMATION CENTER 000030 A00 None #D01 DEVELOPMENT CENTER A00 None #D11 MANUFACTURING SYSTEMS 000060 D01 None #D21 ADMINISTRATION SYSTEMS 000070 D01 None #E01 SUPPORT SERVICES 000050 A00 None #E11 OPERATIONS 000090 E01 None #E21 SOFTWARE SUPPORT 000100 E01 None #__ZOS_EXPECTED__ #10 Sanders Mgr 7 18357.50 #20 Pernal Sales 8 18171.25 612.45 #30 Marenghi Mgr 5 17506.75 #40 OBrien Sales 6 18006.00 846.55 #50 Hanes Mgr 10 20659.80 #60 Quigley Sales 16808.30 650.25 #70 Rothman Sales 7 16502.83 1152.00 #80 James Clerk 13504.60 128.20 #90 Koonitz Sales 6 18001.75 1386.70 #100 Plotz Mgr 7 18352.80 #110 Ngan Clerk 5 12508.20 206.60 #120 Naughton Clerk 12954.75 180.00 #130 Yamaguchi Clerk 6 10505.90 75.60 #140 Fraye Mgr 6 21150.00 #150 Williams Sales 6 19456.50 637.65 #160 Molinare Mgr 7 22959.20 #170 Kermisch Clerk 4 12258.50 110.10 #180 Abrahams Clerk 3 12009.75 236.50 #190 Sneider Clerk 8 14252.75 126.50 #200 Scoutten Clerk 11508.60 84.20 #210 Lu Mgr 10 20010.00 #220 Smith Sales 7 17654.50 992.80 #230 Lundquist Clerk 3 13369.80 189.65 #240 Daniels Mgr 5 19260.25 #250 Wheeler Clerk 6 14460.00 513.30 #260 Jones Mgr 12 21234.00 #270 Lea Mgr 9 18555.50 #280 Wilson Sales 9 18674.50 811.50 #290 Quill Mgr 10 19818.00 #300 Davis Sales 5 15454.50 806.10 #310 Graham Sales 13 21000.00 200.30 #320 Gonzales Sales 4 16858.20 844.00 #330 Burke Clerk 1 10988.00 55.50 #340 Edwards Sales 7 17844.00 1285.00 #350 Gafney Clerk 5 13030.50 188.00 #A00 SPIFFY COMPUTER SERVICE DIV. 000010 A00 None #B01 PLANNING 000020 A00 None #C01 INFORMATION CENTER 000030 A00 None #D01 DEVELOPMENT CENTER A00 None #D11 MANUFACTURING SYSTEMS 000060 D01 None #D21 ADMINISTRATION SYSTEMS 000070 D01 None #E01 SUPPORT SERVICES 000050 A00 None #E11 OPERATIONS 000090 E01 None #E21 SOFTWARE SUPPORT 000100 E01 None #__SYSTEMI_EXPECTED__ #10 Sanders Mgr 7 18357.50 #20 Pernal Sales 8 18171.25 612.45 #30 Marenghi Mgr 5 17506.75 #40 OBrien Sales 6 18006.00 846.55 #50 Hanes Mgr 10 20659.80 #60 Quigley Sales 16808.30 650.25 #70 Rothman Sales 7 16502.83 1152.00 #80 James Clerk 13504.60 128.20 #90 Koonitz Sales 6 18001.75 1386.70 #100 Plotz Mgr 7 18352.80 #110 Ngan Clerk 5 12508.20 206.60 #120 Naughton Clerk 12954.75 180.00 #130 Yamaguchi Clerk 6 10505.90 75.60 #140 Fraye Mgr 6 21150.00 #150 Williams Sales 6 19456.50 637.65 #160 Molinare Mgr 7 22959.20 #170 Kermisch Clerk 4 12258.50 110.10 #180 Abrahams Clerk 3 12009.75 236.50 #190 Sneider Clerk 8 14252.75 126.50 #200 Scoutten Clerk 11508.60 84.20 #210 Lu Mgr 10 20010.00 #220 Smith Sales 7 17654.50 992.80 #230 Lundquist Clerk 3 13369.80 189.65 #240 Daniels Mgr 5 19260.25 #250 Wheeler Clerk 6 14460.00 513.30 #260 Jones Mgr 12 21234.00 #270 Lea Mgr 9 18555.50 #280 Wilson Sales 9 18674.50 811.50 #290 Quill Mgr 10 19818.00 #300 Davis Sales 5 15454.50 806.10 #310 Graham Sales 13 21000.00 200.30 #320 Gonzales Sales 4 16858.20 844.00 #330 Burke Clerk 1 10988.00 55.50 #340 Edwards Sales 7 17844.00 1285.00 #350 Gafney Clerk 5 13030.50 188.00 #A00 SPIFFY COMPUTER SERVICE DIV. 000010 A00 None #B01 PLANNING 000020 A00 None #C01 INFORMATION CENTER 000030 A00 None #D01 DEVELOPMENT CENTER A00 None #D11 MANUFACTURING SYSTEMS 000060 D01 None #D21 ADMINISTRATION SYSTEMS 000070 D01 None #E01 SUPPORT SERVICES 000050 A00 None #E11 OPERATIONS 000090 E01 None #E21 SOFTWARE SUPPORT 000100 E01 None #__IDS_EXPECTED__ #10 Sanders Mgr 7 18357.50 #20 Pernal Sales 8 18171.25 612.45 #30 Marenghi Mgr 5 17506.75 #40 OBrien Sales 6 18006.00 846.55 #50 Hanes Mgr 10 20659.80 #60 Quigley Sales 16808.30 650.25 #70 Rothman Sales 7 16502.83 1152.00 #80 James Clerk 13504.60 128.20 #90 Koonitz Sales 6 18001.75 1386.70 #100 Plotz Mgr 7 18352.80 #110 Ngan Clerk 5 12508.20 206.60 #120 Naughton Clerk 12954.75 180.00 #130 Yamaguchi Clerk 6 10505.90 75.60 #140 Fraye Mgr 6 21150.00 #150 Williams Sales 6 19456.50 637.65 #160 Molinare Mgr 7 22959.20 #170 Kermisch Clerk 4 12258.50 110.10 #180 Abrahams Clerk 3 12009.75 236.50 #190 Sneider Clerk 8 14252.75 126.50 #200 Scoutten Clerk 11508.60 84.20 #210 Lu Mgr 10 20010.00 #220 Smith Sales 7 17654.50 992.80 #230 Lundquist Clerk 3 13369.80 189.65 #240 Daniels Mgr 5 19260.25 #250 Wheeler Clerk 6 14460.00 513.30 #260 Jones Mgr 12 21234.00 #270 Lea Mgr 9 18555.50 #280 Wilson Sales 9 18674.50 811.50 #290 Quill Mgr 10 19818.00 #300 Davis Sales 5 15454.50 806.10 #310 Graham Sales 13 21000.00 200.30 #320 Gonzales Sales 4 16858.20 844.00 #330 Burke Clerk 1 10988.00 55.50 #340 Edwards Sales 7 17844.00 1285.00 #350 Gafney Clerk 5 13030.50 188.00 #A00 SPIFFY COMPUTER SERVICE DIV. 000010 A00 None #B01 PLANNING 000020 A00 None #C01 INFORMATION CENTER 000030 A00 None #D01 DEVELOPMENT CENTER A00 None #D11 MANUFACTURING SYSTEMS 000060 D01 None #D21 ADMINISTRATION SYSTEMS 000070 D01 None #E01 SUPPORT SERVICES 000050 A00 None #E11 OPERATIONS 000090 E01 None #E21 SOFTWARE SUPPORT 000100 E01 None
print("\nERROR: Unable to obtain the information desired\n.") conn.closeConnection() exit(-1) # Otherwise, Complete The Status Message else: print("Done!\n") # As Long As There Are Records (That Were Produced By The ibm_db.statistics API), ... noData = False loopCounter = 1 while noData is False: # Retrieve A Record And Store It In A Python Dictionary try: dataRecord = ibm_db.fetch_assoc(resultSet) except: pass # If The Data Could Not Be Retrieved Or If There Was No Data To Retrieve, Set The # "No Data" Flag And Exit The Loop if dataRecord is False: noData = True # Otherwise, Display The Information Retrieved else: # Display Record Header Information print("Record number " + str(loopCounter) + " details:") print("______________________________________________")
def main(params): logging.info('Calling fn_get_submission_results') try: cos_everest_submission_bucket = params.get( "cos_everest_submission_bucket", None) if cos_everest_submission_bucket is None or "": raise Exception("Pass location of the bucket") submission_id = params.get("submission_id", None) if submission_id is None or "": raise Exception("Pass submission_id") submissions_data_folder = params.get("submissions_data_folder", None) if submissions_data_folder is None or "": raise Exception("Pass submissions_data_folder") standardized_txt_dir = params.get("standardized_txt_dir", None) if standardized_txt_dir is None or "": raise Exception("Pass standardized_txt_dir") mode = params.get("mode", None) if mode is None or "": raise Exception("Pass mode") model_id = params.get("model_id", None) if model_id is None or "": raise Exception("Pass model_id") object_storage_key = submissions_data_folder + "/" + \ mode + "/" + str(submission_id) output_object_storage_key = submissions_data_folder + "/" + \ mode + "/" + str(submission_id) + "/" + \ "nlu_results" + "/" + "output.json" # + "/" + standardized_txt_dir extensions = ['txt'] regex = r"^" + object_storage_key + ".*txt$" file_keys = cosutils.get_bucket_contents(cos_everest_submission_bucket, regex) nlu_service = watson_nlu_utils.inst() results_dict = {} nlu_results_list = [] for key in file_keys: print("Processing file:: {}", file_keys) if key.endswith(tuple(extensions)): txt_file_bytes = cosutils.get_item( cos_everest_submission_bucket, key) text = txt_file_bytes.decode("utf-8") print("text:: ", len(text.strip())) nlu_results = None if text is not None and len(text.strip()) != 0: nlu_results = watson_nlu_utils.get_result( nlu_service, model_id, text) nlu_results_list.append(nlu_results) # get Final cleaned results nlu_results_dict = {} nlu_response = get_clean_results(nlu_results_list) nlu_results_dict["result"] = nlu_response res_bytes = str(nlu_results_dict).encode('utf-8') print("res_bytes::", res_bytes) # store in object storage return_val = cosutils.save_file(cos_everest_submission_bucket, output_object_storage_key, res_bytes) if return_val is "SUCCESS": print("File Uploaded to object storage successfully:: {} ", output_object_storage_key) validation_status = get_validation_status(nlu_response) print("validation_status", validation_status["status"]) db_conn = db2utils.get_connection() sql = f'''SELECT ID, STATUS, TO_CHAR(FIRST_UPDATED,'YYYY-MM-DD HH.MI.SS') as FIRST_UPDATED, TO_CHAR(LAST_UPDATED,'YYYY-MM-DD HH.MI.SS') as LAST_UPDATED FROM FINAL TABLE (UPDATE EVERESTSCHEMA.EVRE_LEARNING_EMAIL_MSGS SET STATUS = '{validation_status["status"]}' where ID = {submission_id}) ''' print("sql: {}".format(sql)) stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_assoc(stmt) result_list = [] if result: result_list.append(result) result_dict = {} result_dict["result"] = result_list result_dict["status"] = "SUCCESS" print(result_dict) return result_dict except (ibm_db.conn_error, ibm_db.conn_errormsg, Exception) as err: logging.exception(err) result_dict = {} result_dict["error"] = err result_dict["status"] = "FAILURE" return result_dict return {"result": "Flow should not reach here"}
import ibm_db conn = ibm_db.connect("DATABASE=sample;HOSTNAME=localhost;PORT=50000;PROTOCOL=TCPIP;UID=db2inst1;PWD=db2inst1;", "", "") sql = "SELECT * FROM STAFF" stmt = ibm_db.exec_immediate(conn, sql) #result = ibm_db.fetch_both(stmt) result = ibm_db.fetch_assoc(stmt) while result: print(result) result = ibm_db.fetch_assoc(stmt)
def count_scale(): start = request.args.get('start', default='2020-06-01') end = request.args.get('end', default='2020-06-01') start = '2020-06-01' if start == '' else start end = '2020-06-08' if end == '' else end start = datetime.datetime.strptime(start, "%Y-%m-%d") end = datetime.datetime.strptime(end, "%Y-%m-%d") scale = request.args.get('scale', '3') slot01, slot12, slot23, slot34, slot45, slot56, slot67 = 0, 0, 0, 0, 0, 0, 0 scatter_attr = [] # connect to DB2 db2conn = ibm_db.connect(db2cred['ssldsn'], "", "") if db2conn: sql = "SELECT * FROM EARTHQUAKE WHERE MAGTYPE=\'ml\' AND MAG>=?" stmt = ibm_db.prepare(db2conn, sql) ibm_db.bind_param(stmt, 1, scale) ibm_db.execute(stmt) rows = [] result = ibm_db.fetch_assoc(stmt) while result != False: curr_date = result['TIME'][:10] curr_date = datetime.datetime.strptime(curr_date, "%Y-%m-%d") if start <= curr_date <= end: mag_scale = float(result['MAG']) if 0 <= mag_scale <= 1: slot01 += 1 if 1 <= mag_scale <= 2: slot12 += 1 if 2 <= mag_scale <= 3: slot23 += 1 if 3 <= mag_scale <= 4: slot34 += 1 if 4 <= mag_scale <= 5: slot45 += 1 if 5 <= mag_scale <= 6: slot56 += 1 if 6 <= mag_scale <= 7: slot67 += 1 scatter_attr.append({ "MAG": float(result['MAG']), "DEPTH": float(result['DEPTH']) }) rows.append(result.copy()) result = ibm_db.fetch_assoc(stmt) ibm_db.close(db2conn) return render_template('count_scale.html', rows=rows, sa=scatter_attr, ci={ "0-1": slot01, "1-2": slot12, "2-3": slot23, "3-4": slot34, "4-5": slot45, "5-6": slot56, "6-7": slot67 })
def show_cost(): mode = request.args.get('mode1') if request.args.get( 'mode1') else request.args.get('mode2') mode = mode.split(' ')[-1] code = request.args.get('code') if request.args.get('code') else -1 name = request.args.get('name') if request.args.get('name') else -1 times = request.args.get('time') if request.args.get('time') else 1 print('zzz', code, time) subsql = '' if code != -1: subsql += 'CODE=\'{}\''.format(code) if name != -1: subsql += ' AND ENTITY=\'{}\''.format(name) print('wwww', subsql, mode) rows1 = [] rows2 = [] start = time.time() for _ in range(int(times)): # connect to DB2 if mode == 'RDB': db2conn = ibm_db.connect(db2cred['ssldsn'], "", "") if db2conn: sql = "SELECT * FROM PC WHERE {};".format(subsql) print(sql) stmt = ibm_db.exec_immediate(db2conn, sql) result = ibm_db.fetch_assoc(stmt) while result != False: rows1.append(result.copy()) result = ibm_db.fetch_assoc(stmt) sql = "SELECT * FROM SP WHERE {};".format(subsql) print(sql) stmt = ibm_db.exec_immediate(db2conn, sql) result = ibm_db.fetch_assoc(stmt) while result != False: rows2.append(result.copy()) result = ibm_db.fetch_assoc(stmt) ibm_db.close(db2conn) elif mode == 'Memcache': global cache_pc if code != -1: df1 = cache_pc[cache_pc['CODE'] == code] for _, r in df1.iterrows(): rows1.append(r.to_dict()) df1 = cache_sp[cache_sp['CODE'] == code] for _, r in df1.iterrows(): rows2.append(r.to_dict()) if name != -1: df2 = cache_pc[cache_pc['ENTITY'] == name] for _, r in df2.iterrows(): rows1.append(r.to_dict()) df2 = cache_sp[cache_sp['CODE'] == code] for _, r in df1.iterrows(): rows2.append(r.to_dict()) end = time.time() elapse = end - start return render_template('show_cost.html', app=appenv, rows1=rows1, rows2=rows2, e=elapse)
def main(params): logging.info('Calling fn_document_conversion_pdf.') try: cos_everest_submission_bucket = params.get( "cos_everest_submission_bucket", None) if cos_everest_submission_bucket is None or "": raise Exception("Pass location of the bucket") submission_id = params.get("submission_id", None) if submission_id is None or "": raise Exception("Pass submission_id") submissions_data_folder = params.get("submissions_data_folder", None) if submissions_data_folder is None or "": raise Exception("Pass submissions_data_folder") mode = params.get("mode", None) if mode is None or "": raise Exception("Pass mode") object_storage_key = submissions_data_folder + "/" + mode + "/" + str( submission_id) + "/" regex = r"^" + object_storage_key + ".*$" file_keys = cosutils.get_bucket_contents(cos_everest_submission_bucket, regex) extensions = ['.docx', '.doc', 'pptx'] for key in file_keys: if key.lower().endswith(tuple(extensions)): file_name = os.path.basename(key) file_name_without_ext, file_extension = os.path.splitext( file_name) url = OBJECT_STORAGE_PUBLIC_URL + "/" + object_storage_key + quote( file_name) PARAMS = { "apikey": convertio_api_key, "input": "url", "file": url, "outputformat": "pdf" } print(url) # sending get request and saving the response as response object r = requests.post(url=CONVERT_IO_URL, data=json.dumps(PARAMS), stream=True) return_val = json.loads(r.text) print("1......return_val::{}", return_val) status = return_val["status"] code = return_val["code"] if code == 200 and status == "ok": id = return_val["data"]["id"] print("converted document id::", id) check_status_url = CONVERT_IO_URL + "/" + id + "/status" print("check_status_url::", check_status_url) while True: r = requests.get(url=check_status_url) return_val = json.loads(r.text) print("2.......status::return_val::", return_val) status = return_val["status"] code = return_val["code"] if code == 200 and status == "ok": step = return_val["data"]["step"] step_percent = return_val["data"]["step_percent"] if step == "finish" and step_percent == 100: id = return_val["data"]["id"] print( "Get content, store in object storage and update db2 and exist" ) # get content get_result_url = CONVERT_IO_URL + "/" + id + "/dl/base64" print("status::get_result_url::", get_result_url) r = requests.get(url=get_result_url) return_val = json.loads(r.text) status = return_val["status"] code = return_val["code"] if code == 200 and status == "ok": content = return_val["data"]["content"] pdf_object_storage_key = object_storage_key + "final_pdf" + "/" + file_name_without_ext + ".pdf" print( "cos_everest_submission_bucket: {}: pdf_object_storage_key: {} " .format(cos_everest_submission_bucket, pdf_object_storage_key)) # Write attachments to the object storage return_val = cosutils.save_file( cos_everest_submission_bucket, pdf_object_storage_key, base64.b64decode(content)) db_conn = db2utils.get_connection() print("db_conn: {}".format(db_conn)) sql = f'''SELECT ID FROM FINAL TABLE (INSERT INTO EVERESTSCHEMA.EVRE_LEARNING_EMAIL_ATTACHMENTS (EVRE_EMAIL_MSG_ID, DOCUMENT_NAME, DOCUMENT_TYPE, CLASSIFICATION_TYPE, STATUS, USED_FOR, DESCRIPTION) VALUES ({submission_id}, '{pdf_object_storage_key}', '.pdf', 'N/A', 'N', 'RUNTIME', 'SPLIT_PDF') ) ''' print("sql: {}".format(sql)) stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_both(stmt) attachment_id = None if result: attachment_id = result["ID"] break else: time.sleep(2) # End of for loop for PDF conversion db_conn = db2utils.get_connection() sql = f'''SELECT ID, STATUS, TO_CHAR(FIRST_UPDATED,'YYYY-MM-DD HH.MI.SS') as FIRST_UPDATED, TO_CHAR(LAST_UPDATED,'YYYY-MM-DD HH.MI.SS') as LAST_UPDATED FROM FINAL TABLE (UPDATE EVERESTSCHEMA.EVRE_LEARNING_EMAIL_MSGS SET STATUS = 'SPLIT_PDF' where ID = {submission_id}) ''' print("sql: {}".format(sql)) stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_assoc(stmt) result_list = [] if result: result_list.append(result) result_dict = {} result_dict["result"] = result_list result_dict["status"] = "SUCCESS" return result_dict except (ibm_db.conn_error, ibm_db.conn_errormsg, Exception) as err: logging.exception(err) result_dict = {} result_dict["error"] = err result_dict["status"] = "FAILURE" return result_dict return {"result": "Flow should not reach here"}
def test_IBMHIST(conn, database): print("Testing IBMHIST functionality ...") # test IBMHIST.PROC_COLLECT with a dummy SQL task print(" Testing IBMHIST.PROC_COLLECT with a dummy SQL task ...") # create dummy SQL task ibm_db.exec_immediate( conn, "INSERT INTO IBMHIST.TAB_TASKS VALUES ( 'DUMMY_SQL_TASK', 'SQL', 'SELECT ''DUMMY_DATA'' FROM SYSIBM.SYSDUMMY1', 'DUMMY_HEADER\n')" ) # call IBMHIST.PROC_COLLECT on dummy SQL task ibm_db.exec_immediate( conn, "CALL IBMHIST.PROC_COLLECT('%s', 'DUMMY_SQL_TASK')" % (database)) # check IBMHIST.TAB_ERRS for errors stmt = ibm_db.exec_immediate(conn, "SELECT * FROM IBMHIST.TAB_ERRS") errs = ibm_db.fetch_assoc(stmt) assert not errs, "Errors were produced calling IBMHIST.PROC_COLLECT on a dummy SQL task: %s" % errs # check IBMHIST.TAB_DIRS for directory stmt = ibm_db.exec_immediate( conn, "SELECT * FROM IBMHIST.TAB_DIRS WHERE STATUS = 'COLL'") coll_dir = ibm_db.fetch_assoc(stmt) assert coll_dir, "Collection directory was not found in IBMHIST.TAB_DIRS after calling IBMHIST.PROC_COLLECT" # ensure collection directory and collection file exists assert os.path.exists( coll_dir['PATH'] ), "Collection directory was not found after calling IBMHIST.PROC_COLLECT: %s" % coll_dir[ 'PATH'] coll_file = [ f for f in os.listdir(coll_dir['PATH']) if 'DUMMY_SQL_TASK' in f ] assert coll_file, "Collection file was not found after calling IBMHIST.PROC_COLLECT: %s" % os.path.join( coll_dir['PATH'], 'DUMMY_SQL_TASK_timestamp.del') # read collection file and ensure header and data is correct coll_file = os.path.join(coll_dir['PATH'], coll_file[0]) with open(coll_file) as f: lines = f.readlines() assert "DUMMY_HEADER" in lines[ 0], "Header not found in collection file %s after calling IBMHIST.PROC_COLLECT on a dummy SQL task" % coll_file assert "DUMMY_DATA" in lines[ 1], "Data not found in collection file %s after calling IBMHIST.PROC_COLLECT on a dummy SQL task" % coll_file # test IBMHIST.PROC_COLLECT with a dummy SYS task print(" Testing IBMHIST.PROC_COLLECT with a dummy SYS task ...") # create dummy SYS task ibm_db.exec_immediate( conn, "INSERT INTO IBMHIST.TAB_TASKS VALUES ( 'DUMMY_SYS_TASK', 'SYS', 'echo DUMMY_DATA', 'DUMMY_HEADER\n')" ) # call IBMHIST.PROC_COLLECT on dummy SYS task ibm_db.exec_immediate( conn, "CALL IBMHIST.PROC_COLLECT('%s', 'DUMMY_SYS_TASK')" % (database)) # check IBMHIST.TAB_ERRS for errors stmt = ibm_db.exec_immediate(conn, "SELECT * FROM IBMHIST.TAB_ERRS") errs = ibm_db.fetch_assoc(stmt) assert not errs, "Errors were produced calling IBMHIST.PROC_COLLECT on a dummy SYS task: %s" % errs # check IBMHIST.TAB_DIRS for directory stmt = ibm_db.exec_immediate( conn, "SELECT * FROM IBMHIST.TAB_DIRS WHERE STATUS = 'COLL'") coll_dir = ibm_db.fetch_assoc(stmt) assert coll_dir, "Collection directory was not found in IBMHIST.TAB_DIRS after calling IBMHIST.PROC_COLLECT" # ensure collection directory and collection file exists assert os.path.exists( coll_dir['PATH'] ), "Collection directory was not found after calling IBMHIST.PROC_COLLECT: %s" % coll_dir[ 'PATH'] coll_file = [ f for f in os.listdir(coll_dir['PATH']) if 'DUMMY_SYS_TASK' in f ] assert coll_file, "Collection file was not found after calling IBMHIST.PROC_COLLECT: %s" % os.path.join( coll_dir['PATH'], 'DUMMY_SYS_TASK_timestamp.del') # read collection file and ensure header and data is correct coll_file = os.path.join(coll_dir['PATH'], coll_file[0]) with open(coll_file) as f: lines = f.readlines() assert "DUMMY_HEADER" in lines[ 0], "Header not found in collection file %s after calling IBMHIST.PROC_COLLECT on a dummy SYS task" % coll_file assert "DUMMY_DATA" in lines[ 1], "Data not found in collection file %s after calling IBMHIST.PROC_COLLECT on a dummy SYS task" % coll_file # test IBMHIST.PROC_ARCHIVE to archive collection directories print( " Testing IBMHIST.PROC_ARCHIVE to archive collection directories ..." ) # set time to 1 hour behind in IBMHIST.TAB_DIRS so collection directories are archived ibm_db.exec_immediate(conn, "UPDATE IBMHIST.TAB_DIRS SET TIME = TIME - 1 HOURS") # call IBMHIST.PROC_ARCHIVE ibm_db.exec_immediate(conn, "CALL IBMHIST.PROC_ARCHIVE('%s')" % (database)) # check IBMHIST.TAB_ERRS for errors stmt = ibm_db.exec_immediate(conn, "SELECT * FROM IBMHIST.TAB_ERRS") errs = ibm_db.fetch_assoc(stmt) assert not errs, "Errors were produced calling IBMHIST.PROC_ARCHIVE to archive collection directories: %s" % errs # check IBMHIST.TAB_DIRS for directory stmt = ibm_db.exec_immediate( conn, "SELECT * FROM IBMHIST.TAB_DIRS WHERE STATUS = 'ARCH'") arch_dir = ibm_db.fetch_assoc(stmt) assert arch_dir, "Archival directory was not found in IBMHIST.TAB_DIRS after calling IBMHIST.PROC_ARCHIVE" # ensure archival directory exists and collection directory no longer exists assert os.path.exists( arch_dir['PATH'] ), "Archival directory was not found after calling IBMHIST.PROC_ARCHIVE: %s" % arch_dir[ 'PATH'] assert not os.path.exists( coll_dir['PATH'] ), "Collection directory was not deleted after calling IBMHIST.PROC_ARCHIVE: %s" % coll_dir[ 'PATH'] # test IBMHIST.PROC_ARCHIVE to delete archival directories once a max size has been reached print( " Testing IBMHIST.PROC_ARCHIVE to delete archival directories once max size is reached ..." ) # set max size to max int value in IBMHIST.TAB_DIRS so archival directories are deleted ibm_db.exec_immediate( conn, "UPDATE IBMHIST.TAB_DIRS SET SIZE = 9223372036854775807") # call IBMHIST.PROC_ARCHIVE ibm_db.exec_immediate(conn, "CALL IBMHIST.PROC_ARCHIVE('%s')" % (database)) # check IBMHIST.TAB_ERRS for errors stmt = ibm_db.exec_immediate(conn, "SELECT * FROM IBMHIST.TAB_ERRS") errs = ibm_db.fetch_assoc(stmt) assert not errs, "Errors were produced calling IBMHIST.PROC_ARCHIVE to delete archival directories: %s" % errs # check IBMHIST.TAB_DIRS for directory stmt = ibm_db.exec_immediate( conn, "SELECT * FROM IBMHIST.TAB_DIRS WHERE STATUS = 'ARCH'") del_dir = ibm_db.fetch_assoc(stmt) assert not del_dir, "Archival directory was not deleted from IBMHIST.TAB_DIRS after calling IBMHIST.PROC_ARCHIVE to delete archival directories" # ensure archival directory no longer exists assert not os.path.exists( arch_dir['PATH'] ), "Archival directory was not deleted after calling IBMHIST.PROC_ARCHIVE to delete: %s" % arch_dir[ 'PATH']
print(cmd) subprocess.call(cmd) if __name__ == '__main__': num = sys.argv[1]# 获取进程数 num = int(num) #num = 5 #定义进程数 p = Pool(num) url = "DATABASE=databen;HOSTNAME=132.7.42.101;PORT=60000;PROTOCOL=TCPIP;UID=databen;PWD=databen" conn = ibm_db.connect(url,"","") #获取需要处理数据的表名 sql = "select * from mdmm.analysis_conf_tab " stmt = ibm_db.exec_immediate(conn, sql) table_dict = {} while True: res = ibm_db.fetch_assoc(stmt) if not res: break table_dict[res.get("ORI_TABLE_CODE")]=(res.get("ETL_DATE"),res.get("DATE_OFFSET"),res.get("ANA_ALG"),res.get("TO_ANA_TB_PK"),res.get("SYS_CODE")) for i in table_dict: #计算结束日期 in_date = table_dict.get(i)[0] dt = datetime.datetime.strptime(in_date,"%Y%m%d") #print(dt) out_dt = (dt + datetime.timedelta(days = int(table_dict.get(i)[1]))) #print(out_dt) end_dt = str(out_dt)[0:10].replace("-","") #print(end_dt) pk = table_dict.get(i)[3] sys_code= table_dict.get(i)[4] p.apply_async(sub,args=(sys_code,i[4:],in_date,end_dt,pk))
dsn = ("DRIVER={{IBM DB2 ODBC DRIVER}};" "DATABASE={0};" "HOSTNAME={1};" "PORT={2};" "PROTOCOL=TCPIP;" "UID={3};" "PWD={4};").format(dsn_database, dsn_hostname, dsn_port, dsn_uid, dsn_pwd) conn = ibm_db.connect(dsn, "", "") insert = 'SELECT * FROM XXXX.XXXX' stmt = ibm_db.exec_immediate(conn, insert) records = {} count = 0 dictionary = ibm_db.fetch_assoc(stmt) records[count] = dictionary while dictionary != False: count = count + 1 dictionary = ibm_db.fetch_assoc(stmt) records[count] = dictionary filename = 'co_200.csv' # Constants for IBM COS values COS_ENDPOINT = "" COS_API_KEY_ID = "" COS_AUTH_ENDPOINT = "" COS_RESOURCE_CRN = "" COS_BUCKET_LOCATION = "us-standard" bucket_name = "XXXX" '''Cloud Object Storage Methods'''
def main(params): logging.info('Calling fn_split_pdf.') try: cos_everest_submission_bucket = params.get( "cos_everest_submission_bucket", None) if cos_everest_submission_bucket is None or "": raise Exception("Pass location of the bucket") final_pdf_folder = params.get("final_pdf_folder", None) if final_pdf_folder is None or "": raise Exception("Pass pdf folder to split files") submissions_data_folder = params.get("submissions_data_folder", None) if submissions_data_folder is None or "": raise Exception("Pass submissions_data_folder") submission_id = params.get("submission_id", None) if submission_id is None or "": raise Exception("Pass submission_id") mode = params.get("mode", None) if mode is None or "": raise Exception("Pass mode") object_storage_key = submissions_data_folder + "/" + \ mode + "/" + str(submission_id) + "/" # + "/" + final_pdf_folder extensions = ['pdf'] regex = r"^" + object_storage_key + ".*(?i)(pdf).*$" file_keys = cosutils.get_bucket_contents(cos_everest_submission_bucket, regex) print(file_keys) for key in file_keys: if key.lower().endswith(tuple(extensions)): file_name = os.path.basename(key) file_name_without_ext, file_extension = os.path.splitext( file_name) pdf_file_bytes = cosutils.get_item( cos_everest_submission_bucket, key) db_conn = db2utils.get_connection() print("db_conn: {}".format(db_conn)) sql = f'''SELECT ID FROM EVERESTSCHEMA.EVRE_LEARNING_EMAIL_ATTACHMENTS where EVRE_EMAIL_MSG_ID={submission_id} and DOCUMENT_NAME='{key}' ''' print("sql: {}".format(sql)) stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_both(stmt) pdf_id = -1 if result: pdf_id = result["ID"] # read pdf pdf = PdfFileReader(BytesIO(pdf_file_bytes)) num_of_pages = pdf.getNumPages() print("num_of_pages:: {} ", num_of_pages) for page in range(num_of_pages): pdf_writer = PdfFileWriter() pdf_writer.addPage(pdf.getPage(page)) split_pdf_dir = "final_pdf_split" output_filename_key = '{}{}/{}_page_{}.pdf'.format( object_storage_key, split_pdf_dir, file_name_without_ext, page + 1) tmp = BytesIO() pdf_writer.write(tmp) tmp.seek(0) output_page_bytes = tmp.read() # print("Bytes:: {} ", output_page_bytes) return_val = cosutils.save_file( cos_everest_submission_bucket, output_filename_key, output_page_bytes) if return_val is "SUCCESS": print( "File Uploaded to object storage successfully:: {} ", output_filename_key) db_conn = db2utils.get_connection() print("db_conn: {}".format(db_conn)) sql = f'''SELECT ID FROM FINAL TABLE (INSERT INTO EVERESTSCHEMA.EVRE_LEARNING_SPLIT_CONTENT (EVRE_EMAIL_MSG_ID, EVRE_LEARNING_EMAIL_ATTACHMENTS_ID, DOCUMENT_NAME, DOCUMENT_TYPE, CLASSIFICATION_TYPE, STATUS, USED_FOR, DESCRIPTION) VALUES ({submission_id}, {pdf_id}, '{output_filename_key}', '.pdf', 'N/A', 'N', 'RUNTIME', 'STANDARDIZE_TO_TXT') ) ''' print("sql: {}".format(sql)) stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_both(stmt) attachment_id = None if result: attachment_id = result["ID"] # end of for loop db_conn = db2utils.get_connection() sql = f'''SELECT ID, STATUS, TO_CHAR(FIRST_UPDATED,'YYYY-MM-DD HH.MI.SS') as FIRST_UPDATED, TO_CHAR(LAST_UPDATED,'YYYY-MM-DD HH.MI.SS') as LAST_UPDATED FROM FINAL TABLE (UPDATE EVERESTSCHEMA.EVRE_LEARNING_EMAIL_MSGS SET STATUS = 'STANDARDIZE_TO_TXT' where ID = {submission_id}) ''' print("sql: {}".format(sql)) stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_assoc(stmt) result_list = [] if result: result_list.append(result) result_dict = {} result_dict["result"] = result_list result_dict["status"] = "SUCCESS" return result_dict except (ibm_db.conn_error, ibm_db.conn_errormsg, Exception) as err: logging.exception(err) result_dict = {} result_dict["error"] = err result_dict["status"] = "FAILURE" return result_dict return {"result": "Flow should not reach here"}
def main(params): logging.info('Calling fn_extract_email_msgs.') try: cos_everest_submission_bucket = params.get( "cos_everest_submission_bucket", None) if cos_everest_submission_bucket is None or "": raise Exception("Pass location of the bucket") submission_id = params.get("submission_id", None) if submission_id is None or "": raise Exception("Pass submission_id ") mode = params.get("mode", None) if mode is None or "": raise Exception("Pass mode ") # Create a directory on the local drive object_storage_key_prefix = OBJECT_STORAGE_EMAIL_ATTACHMENTS_ROOT_FOLDER + "/" + mode db_conn = db2utils.get_connection() print("db_conn: {}".format(db_conn)) sql = f'''SELECT ID, DOCUMENT_NAME, ENCODED_ID, HEX(ENCODED_ID) as MSG_DOCUMENT_ID FROM EVERESTSCHEMA.EVRE_LEARNING_EMAIL_MSGS where ID={submission_id}''' stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_both(stmt) msg_object_storage_key = None msg_document_id = None msg_id = None if result: msg_id = result["ID"] msg_object_storage_key = result["DOCUMENT_NAME"] msg_encoded_id = result["ENCODED_ID"] msg_document_id = result["MSG_DOCUMENT_ID"] else: raise Exception("No email message document found") email_message_bytes = cosutils.get_item(cos_everest_submission_bucket, msg_object_storage_key) # extract attachments msg = extract_msg.Message(email_message_bytes, attachmentClass=EmailAttachmentClass) # save message body # self.__crlf = '\r\n' msg_from = msg.sender msg_to = msg.to msg_cc = msg.cc msg_subject = msg.subject msg_date = msg.date msg_body = msg.body msg_file_content = """ {msg_from} {msg_to} {msg_cc} {msg_subject} {msg_date} {msg_body} """.format(msg_from=msg_from, msg_to=msg_to, msg_cc=msg_cc, msg_subject=msg_subject, msg_date=msg_date, msg_body=msg_body) print(msg_file_content) object_storage_key = object_storage_key_prefix + "/" + str( msg_id) + "/" + (msg_document_id + "_message.txt") return_val = cosutils.save_file(cos_everest_submission_bucket, object_storage_key, msg_file_content) if return_val is "SUCCESS": print("File Uploaded to object storage successfully") # create entries in DB2 db_conn = db2utils.get_connection() print("db_conn: {}".format(db_conn)) sql = f'''SELECT ID FROM FINAL TABLE (INSERT INTO EVERESTSCHEMA.EVRE_LEARNING_EMAIL_ATTACHMENTS (EVRE_EMAIL_MSG_ID, DOCUMENT_NAME, DOCUMENT_TYPE, CLASSIFICATION_TYPE, STATUS, USED_FOR) VALUES ({msg_id}, '{object_storage_key}', '.txt', 'N/A', 'CONVERT_TO_PDF', '{mode}') ) ''' # print ("sql: {}".format(sql)) stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_both(stmt) attachment_id = None if result: attachment_id = result["ID"] print(f'attachment_id: {attachment_id}') else: raise Exception("File upload to object storage failed") attachments = msg.attachments count_attachments = len(attachments) print("count_attachments: {}", count_attachments) if count_attachments == 0: print('No Atatchments found for msg:: {}', msg_object_storage_key) else: attachment_dir = None for i in range(count_attachments): attachment = attachments[i] attachment_id = attachment.save( object_storage_bucket_name=cos_everest_submission_bucket, object_storage_key_prefix=object_storage_key_prefix, save_to_object_storage=True, msg_id=msg_id, msg_encoded_id=msg_encoded_id, msg_document_id=msg_document_id, mode=mode) sql = f'''SELECT ID, STATUS, TO_CHAR(FIRST_UPDATED,'YYYY-MM-DD HH.MI.SS') as FIRST_UPDATED, TO_CHAR(LAST_UPDATED,'YYYY-MM-DD HH.MI.SS') as LAST_UPDATED FROM FINAL TABLE (UPDATE EVERESTSCHEMA.EVRE_LEARNING_EMAIL_MSGS SET STATUS = 'CONVERT_TO_PDF' where ID = {msg_id}) ''' print("sql: {}".format(sql)) stmt = ibm_db.exec_immediate(db_conn, sql) result = ibm_db.fetch_assoc(stmt) result_list = [] if result: id = str(result["ID"]) result_list.append(result) result_dict = {} result_dict["result"] = result_list result_dict["status"] = "SUCCESS" return result_dict except (ibm_db.conn_error, ibm_db.conn_errormsg, Exception) as err: logging.exception(err) result_dict = {} result_dict["error"] = err result_dict["status"] = "FAILURE" return result_dict return {"result": "Flow should not reach here"}
# conn = ibm_db_dbi.connect('driver={IBM DB2 ODBC DRIVER };port=60000;protocol=tcpip;','ccweb','ccweb55','192.168.61.55','webdb2',{}) # 建表 # create = 'create table test1 (name1 char(8) not null primary key,depid smallint,pay bigint)' # result = ibm_db.exec_immediate(conn,create) # if result: # print('建表成功!') # else: # print('建表失败!') # 插入数据 insert = "insert into test1(name1,depid,pay) values(%s,%d,%d)"%(now,num1,num2) # result = ibm_db.exec_immediate(conn,insert) result = insert_data(conn,insert) if result: print('插入数据成功!') else: print('插入数据失败!') # 查询 select = 'select * from test1' # result = ibm_db.exec_immediate(conn,select) result = query_data(conn,select) row = ibm_db.fetch_assoc(result) print('----------------------') while row: print(row.get('NAME1') +' '+ str(row.get('DEPID')) + ' ' +str(row.get('PAY'))) row = ibm_db.fetch_assoc(result) close_db2(conn)
print("Login successful!\n") while (user is not None): print("Welcome to SocialNet, {0}!".format(user[1])) print_menu() print(user) print("\nSocialNet Command Line Interface:\n") while (True): command = input("\t-> ") args = command.split() if (args[0].lower() in available_commands): if (args[0].lower() == "view"): uid_query = "SELECT * FROM User WHERE username = \'{0}\'".format( ' '.join(args[1:])) uid_stmt = db2.exec_immediate(connection, uid_query) uid_res = db2.fetch_assoc(uid_stmt) if (uid_res != False): friend_id = uid_res['OWNID'] else: print( "That user doesn't have an account on SocialNet..." ) break owner_query = "SELECT privacy, description FROM PageOwner WHERE ownID = {0}".format( friend_id) owner_stmt = db2.exec_immediate(connection, owner_query) owner_res = db2.fetch_assoc(owner_stmt) if (owner_res != False): owner_prv = owner_res['PRIVACY'] else:
def get(self): self.set_header('Content-Type', 'application/json') # fetch data from db and return a json result = {} # hotel: id, name, latitude, longitude # attraction: id, name, latitude, longitude # hawker center: id, latitude, longitude bnb_dict = {} attr_dict = {} hawker_center_dict = {} conn = ibm_db.connect( "DATABASE=BLUDB;HOSTNAME=dashdb-entry-yp-dal09-09.services.dal.bluemix.net;\ PORT=50000;PROTOCOL=TCPIP;UID=dash9787;\ PWD=X_c03EeYTe#u;", "", "") sql_airbnb = "SELECT ROOMID,NAME,LATITUDE,LONGITUDE,PRICE,RATING,IMGURL,ROOMURL FROM AIRBNB" stmt = ibm_db.exec_immediate(conn, sql_airbnb) while True: dict_airbnb = ibm_db.fetch_assoc(stmt) if dict_airbnb is False: break bnb_dict[int(dict_airbnb['ROOMID'].strip())] = { 'id': int(dict_airbnb['ROOMID'].strip()), 'name': dict_airbnb['NAME'].strip(), 'price': float(dict_airbnb['PRICE'].strip()), 'rating': float(dict_airbnb['RATING'].strip()), 'lat': float(dict_airbnb['LATITUDE'].strip()), 'lng': float(dict_airbnb['LONGITUDE'].strip()), 'img': dict_airbnb['IMGURL'], 'roomURL': dict_airbnb['ROOMURL'], } sql_attr = "SELECT ATTRACTIONID,NAME,LATITUDE,LONGITUDE,POPULARITY, RATING, CATEGORY,TICKET_PRICE FROM TOURISM_ATTRACTIONS" stmt = ibm_db.exec_immediate(conn, sql_attr) while True: dict_attr = ibm_db.fetch_assoc(stmt) if dict_attr is False: break attr_dict[int(dict_attr['ATTRACTIONID'].strip())] = { 'ATTRACTIONID': int(dict_attr['ATTRACTIONID'].strip()), 'NAME': dict_attr['NAME'].strip(), 'TICKET_PRICE': float(dict_attr['TICKET_PRICE'].strip()), 'LATITUDE': float(dict_attr['LATITUDE'].strip()), 'LONGITUDE': float(dict_attr['LONGITUDE'].strip()), 'CATEGORY': dict_attr['CATEGORY'], 'POPULARITY': dict_attr['POPULARITY'], 'RATING': dict_attr['RATING'], } sql_food = "SELECT FOODID,NAME,LATITUDE,LONGITUDE FROM FOOD" stmt = ibm_db.exec_immediate(conn, sql_food) while True: dict_food = ibm_db.fetch_assoc(stmt) if dict_food is False: break hawker_center_dict[dict_food['FOODID']] = { 'id': dict_food['FOODID'], 'name': dict_food['NAME'], 'lat': float(dict_food['LATITUDE'].strip()), 'lng': float(dict_food['LONGITUDE'].strip()), } dict_all = { 'hotels': bnb_dict, 'attractions': attr_dict, 'hawker_centers': hawker_center_dict } self.write(json_encode(dict_all))
def main(args): global conn repoCount=0 processedRepos=0 logtext="cloudfunction (" errortext="" ssldsn = args["__bx_creds"]["dashDB"]["ssldsn"] #ssldsn = args["ssldsn"] if globals().get("conn") is None: conn = ibm_db.connect(ssldsn, "", "") # go over all system users allTenants=ibm_db.exec_immediate(conn,allTenantsStatement) if (allTenants): # prepare statement for logging logStmt = ibm_db.prepare(conn, insertLogEntry) # fetch first user tenant=ibm_db.fetch_assoc(allTenants) while tenant != False: # go over all repos managed by that user and fetch traffic data # first, login to GitHub as that user gh = github.GitHub(username=tenant["GHUSER"], access_token=tenant["GHTOKEN"]) userRepoCount=0 # prepare and execute statement to fetch related repositories reposStmt = ibm_db.prepare(conn, allReposStatement) if (ibm_db.execute(reposStmt,(tenant["TID"],))): repo=ibm_db.fetch_assoc(reposStmt) while repo != False: repoCount=repoCount+1 # fetch view and clone traffic try: viewStats=gh.repos(repo["USERNAME"], repo["RNAME"]).traffic.views.get() cloneStats=gh.repos(repo["USERNAME"], repo["RNAME"]).traffic.clones.get() if viewStats['views']: mergeViewData(viewStats,repo["RID"]) if cloneStats['clones']: mergeCloneData(cloneStats,repo["RID"]) userRepoCount=userRepoCount+1 # For debugging: # print repo["USERNAME"]+" "+ repo["RNAME"] # update global repo counter processedRepos=processedRepos+1 # fetch next repository repo=ibm_db.fetch_assoc(reposStmt) except: errortext=errortext+str(repo["RID"])+" " # fetch next repository repo=ibm_db.fetch_assoc(reposStmt) # insert log entry ts = time.gmtime() logtext=logtext+str(processedRepos)+"/"+str(repoCount)+")" if errortext !="": logtext=logtext+", repo errors: "+errortext res=ibm_db.execute(logStmt,(tenant["TID"],time.strftime("%Y-%m-%d %H:%M:%S", ts),userRepoCount,logtext)) # fetch next system user tenant=ibm_db.fetch_assoc(allTenants) return {"repoCount": repoCount}
def daynight(): sql = "SELECT * FROM earthquake" day_start = '06:00:00' day_end = '20:00:00' stmt = ibm_db.exec_immediate(db2conn, sql) result = ibm_db.fetch_assoc(stmt) my_list_day = [] my_list_night = [] count_night = 0 count_day = 0 high_mag_day = 0 high_mag_night = 0 while result: time = result['TIME'] time = time.split('.') # print(time) time = time[0].split('T') # print(time) time = str(time[1]) print(time) time = time.split(':') time = int(time[0]) * 3600 + int(time[1]) * 60 + int(time[2]) lon = int(result['LONGITUDE']) if lon > 0: while lon != 0: time = time + 240 lon -= 1 elif lon < 0: while lon != 0: time = time - 240 lon += 1 hr = time // 3600 # hr = hr // 24 time = time - (hr * 3600) min = time // 60 time = time - (min * 60) sec = time if hr < 0: hr = 24 + hr time = [str(hr), str(min), str(sec)] time = (':').join(time) print('new time', time) if result['MAG']: if time <= day_start or time >= day_end: my_list_night.append( [result['TIME'], result['LATITUDE'], result['LONGITUDE'], result['DEPTH'], result['MAG'], result['PLACE']]) count_night += 1 if result['MAG'] > 4: high_mag_night += 1 else: my_list_day.append( [result['TIME'], result['LATITUDE'], result['LONGITUDE'], result['DEPTH'], result['MAG'], result['PLACE']]) count_day += 1 if result['MAG'] > 4: high_mag_day += 1 result = ibm_db.fetch_assoc(stmt) return render_template('resultdaynight.html', data_day=my_list_day, data_night=my_list_night, night=count_night, day=count_day, high_day=high_mag_day, high_night=high_mag_night)
except Exception: pass # If The SQL Statement Could Not Be Executed, Display An Error Message And Exit if returnCode is False: print("\nERROR: Unable to execute the SQL statement specified.") conn.closeConnection() exit(-1) # Otherwise, Complete The Status Message else: print("Done!\n") # Retrieve The Data Produced By The SQL Statement And Store It In A Python Dictionary try: dataRecord = ibm_db.fetch_assoc(preparedStmt) except: pass # If The Data Could Not Be Retrieved, Display An Error Message And Exit if returnCode is False: print("\nERROR: Unable to retrieve the data produced by the SQL statement.") conn.closeConnection() exit(-1) # If The Data Could be Retrieved, Display It else: print("Query results:\n") print("FIRSTNME LASTNAME") print("__________ ________") print("{:10} {:<24}\n" .format(dataRecord['FIRSTNME'], dataRecord['LASTNAME']))
def donate(id): conn = connect.connection() noDonation = "" tem_dcheque = "" if "mail" in session: mail = session["mail"] prevProjects = set() sql_stmt = "select * from projekt where kennung = %s" % id prevPorjectsResult = ibm_db.exec_immediate(conn, sql_stmt) if prevPorjectsResult is not None: row = ibm_db.fetch_tuple(prevPorjectsResult) prevProjects.add(row) while row: row = ibm_db.fetch_tuple(prevPorjectsResult) if row: prevProjects.add(row) sql_stmt = "select * from spenden where spender = '" + mail + "' and projekt = '" + id + "'" donationcheque = ibm_db.exec_immediate(conn, sql_stmt) donation_cheque = ibm_db.fetch_assoc(donationcheque) if donation_cheque is False: tem_dcheque = "possible" if request.method == "POST": donate = request.form["donate"] deciDeoneat = Decimal(donate) anonymousid = request.form["anonymousid"] balanceCheque = set() sql_stmt4 = "select * from konto where inhaber = '" + mail + "'" balanceChequeResult = ibm_db.exec_immediate(conn, sql_stmt4) if balanceChequeResult is not None: row1 = ibm_db.fetch_tuple(balanceChequeResult) balanceCheque.add(row1) currentBalance = Decimal(row1[1]) print(currentBalance, type(currentBalance)) if currentBalance >= deciDeoneat: print(deciDeoneat, type(deciDeoneat)) if anonymousid == 'yes': sql_stmt = "insert into spenden (spender,projekt,spendenbetrag,sichtbarkeit) values ('" + mail + "','" + id + "' , '" + donate + "','privat')" prevPorjectsResult = ibm_db.exec_immediate(conn, sql_stmt) else: sql_stmt1 = "insert into spenden (spender,projekt,spendenbetrag,sichtbarkeit) values ('" + mail + "','" + id + "' , '" + donate + "','oeffentlich')" prevPorjectsResult = ibm_db.exec_immediate(conn, sql_stmt1) spendenInfo = set() sql_stmt2 = ( "select kennung,titel,sum(spendenbetrag),ersteller,finanzierungslimit from projekt left join spenden on projekt.kennung = spenden.projekt where kennung = '%s' group by kennung,titel,ersteller,finanzierungslimit ") % id spendenInfoResult = ibm_db.exec_immediate(conn, sql_stmt2) if spendenInfoResult is not None: row1 = ibm_db.fetch_tuple(spendenInfoResult) spendenInfo.add(row1) ref_sum = row1[2] ref_limit = row1[4] tem_sum = Decimal(ref_sum) tem_limit = Decimal(ref_limit) if tem_sum >= tem_limit: sql_stmt3 = "update projekt set status = 'geschlossen' where kennung = %s" % id prorjectStatusResult = ibm_db.exec_immediate(conn, sql_stmt3) balanceReduce = set() sql_stmt4 = "select * from konto where inhaber = '" + mail + "'" balanceReduceResult = ibm_db.exec_immediate(conn, sql_stmt4) if balanceReduceResult is not None: row1 = ibm_db.fetch_tuple(balanceReduceResult) balanceReduce.add(row1) prev_balance = row1[1] temp_bal = Decimal(prev_balance) newBalance = temp_bal - deciDeoneat print(newBalance, type(newBalance)) tempNewBal = str(newBalance) print(tempNewBal, type(tempNewBal)) sql_stmt3 = "update konto set guthaben = '" + tempNewBal + "' where inhaber = '" + mail + "'" prorjectStatusResult = ibm_db.exec_immediate(conn, sql_stmt3) return redirect(url_for("projectdetails", id=id)) else: noDonation = "yes" return render_template('donate.html', prevProjects=prevProjects, noDonation=noDonation, tem_dcheque=tem_dcheque) else: return redirect(url_for("login"))
def select_data(table_name, area_name): sql = "SELECT * FROM %s WHERE STOCK_NAME='%s'" % (table_name, area_name) stmt = ibm_db.exec_immediate(conn, sql) row = ibm_db.fetch_assoc(stmt) return row['MAX_LENGTH'], row['MAX_WIDTH'], row['CURRENT_RATIO']
gmaps = googlemaps.Client(key='AIzaSyDA-IfaBH36LojL7Xwugxq0wcj1sLyfrf8') data = gmaps.geocode(address) result = { "lat": data[0]['geometry']['location']['lat'], "lng": data[0]['geometry']['location']['lng'] } return result print "****MAKING SQL SELECT QUERY****" # CHANGE TO EXCLUDE ALREADY PROCESSED RECORDS IN THE SQL QUERY!! sql_select_q = "select delivery_id,formatted_address from customer_analytics.gj_active_dealers where delivery_type = 'CONSUMER' and channel_l1 = 'Spark' and channel_l2 = 'Spark Retail Stores'" sql_select_stmt = ibm_db.exec_immediate(sql_conn, sql_select_q) dealer_dict = ibm_db.fetch_assoc(sql_select_stmt) while dealer_dict != False: print "****NEW DEALER****" print "The delivery_id is : ", dealer_dict["DELIVERY_ID"] print "The formatted_address is : ", dealer_dict["FORMATTED_ADDRESS"] loc = geocode(dealer_dict["FORMATTED_ADDRESS"]) print "The Lat is : ", loc['lat'] print "The Lng is : ", loc['lng'] ibm_db.bind_param(sql_insert_stmt, 1, dealer_dict["DELIVERY_ID"]) ibm_db.bind_param(sql_insert_stmt, 2, dealer_dict["FORMATTED_ADDRESS"]) ibm_db.bind_param(sql_insert_stmt, 3, loc['lat']) ibm_db.bind_param(sql_insert_stmt, 4, loc['lng']) ibm_db.execute(sql_insert_stmt)