def insert_record(self): #create object of class Connection con1=Connection() con=con1.conn() #create cursor means create object of cursor cur=con.cursor() #query fire of insert means to insert record into table Employee table q="insert into Employee values(%s,%s,%s,%s,%s)" #input from user eid=int(input("Enter Employee Id : ")) n=input("Enter Employee Name ") #varchar type in table=string a=int(input("Enter Employee Age : ")) m=input("Enter Employee Mobile No. :") s=int(input("Enter Employee Salary :")) #create tuple val=(eid,n,a,m,s) #Insert Query run try: cur.execute(q,val) except: print("Query Error") else: #save in database use commit() con.commit() print("Record Insert Successfully") #close connection cur.close() con.close()
def insert_record(self): #create object of class connection con1 = Connection() con = con1.conn() #create cursor means create object of cursor cur = con.cursor() #insert record into table Employee table q = "insert into employee values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)" #input from user empid = int(input("Enter Employee id=>")) dno = int(input("Enter department number=>")) deptn = input("Enter department name=>") desig = input("Enter designation=>") emn = input("Enter Employee name=>") a = int(input("Enter employee age=>")) c = input("Enter Empolyee city=>") email = input("Enter Email=>") p = input("Enter password=>") m = input("Enter mobile number=>") bs = float(input("Enter basic salary=>")) #create tuple val = (empid, dno, deptn, desig, emn, a, c, email, p, m, bs) #insert Query run try: cur.execute(q, val) except: print("Query Error") else: #save in database use commit() con.commit() print("Record Insert successfully") #close connection cur.close() con.close()
def update_Salary(self): con1=Connection() con=con1.conn() eid=int(input("Enter employee id no. to be updated : ")) s=int(input("Enter Increment salary of Employee : ")) #Fire delete Query query="UPDATE Employee SET Salary=%s WHERE Empid=%s" #prepared cursor #create object of class Connection cur=con.cursor() #to keep cursor on particular path q="SELECT Salary FROM Employee where Empid=%s" cur.execute(q,eid) result=cur.fetchall() sal=result[0] #print(sal) #print(type(sal)) s1=sal[0] print("Current Salary " ,s1) s1=s1+s1*s/100 print("After Increment , New Salary =",s1) val=(s1,eid) #run update query try: cur.execute(query,val) con.commit() #to changes permanently in table print(" Record Update successfully") except: print("Record not found") cur.close()
def update_salary(self): con1 = Connection() con = con1.conn() empid = int(input("Enter employee id")) basic_salary = float(input("Enter salary")) q = "UPDATE employee SET basic_salary=%s where empid=%s" cur = con.cursor() q = "select salary from employee where empid=%s" cur.execute(q, empid) result = cur.fetchall() sal = result[0] #print("current salary",sal) s1 = sal[0] print("current salary", s1) s1 = s1 + s1 * s / 100 print("after salary", s1) val = (s1, empid) try: cur.execute(q, val) con.commit() print("Record update successfully") #print("record update successfully") except: print("Record not found") cur.close() con.close()
class Patient(object): ''' has a tiny percentage of the footprint (and loading time) of the main patient class ''' TOOTH_FIELDS = ("ur8", "ur7", "ur6", "ur5", 'ur4', 'ur3', 'ur2', 'ur1', 'ul1', 'ul2', 'ul3', 'ul4', 'ul5', 'ul6', 'ul7', 'ul8', "lr8", "lr7", "lr6", "lr5", 'lr4', 'lr3', 'lr2', 'lr1', 'll1', 'll2', 'll3', 'll4', 'll5', 'll6', 'll7', 'll8') DECIDUOUS = ('***', '***', '***', 'ulE', 'ulD', 'ulC', 'ulB', 'ulA', 'urA', 'urB', 'urC', 'urD', 'urE', '***', '***', '***', '***', '***', '***', 'lrE', 'lrD', 'lrC', 'lrB', 'lrA', 'llA', 'llB', 'llC', 'llD', 'llE', '***', '***', '***') connection = Connection() def __init__(self, sno): ''' initiate the class with default variables, then load from database ''' if sno <= 0: raise PatientNotFoundException self.serialno = sno db = self.connection.connection cursor = db.cursor() cursor.execute(self.query, (sno, )) row = cursor.fetchone() if not row: raise PatientNotFoundException self.dent1, self.dent0, self.dent3, self.dent2 = row[:4] for i, field in enumerate(self.TOOTH_FIELDS): self.__dict__[field] = row[i + 4] @property def query(self): query = 'SELECT dent1, dent0, dent3, dent2, ' for field in self.TOOTH_FIELDS: query += "%sst, " % field return '%s from patients where serialno = %%s' % query.rstrip(", ") #@property def chartgrid(self): grid = "" chart_dict = {} for quad in (self.dent1, self.dent0, self.dent3, self.dent2): grid += from_signed_byte(quad) for i, tooth in enumerate(self.TOOTH_FIELDS): if grid[i] == "0": chart_dict[tooth] = tooth else: chart_dict[tooth] = self.DECIDUOUS[i] return chart_dict
def view_record(self): con1 = Connection() con = con1.conn() query = "SELECT * FROM employee" cur = con.cursor() #run select query cur.execute(query) result = cur.fetchall() print(result) cur.close() con.close()
def __init__(self, host='localhost', port=61613, user='', passcode='', ver=1.0, stdin=sys.stdin, stdout=sys.stdout): Cmd.__init__(self, 'Tab', stdin, stdout) ConnectionListener.__init__(self) self.conn = Connection([(host, port)], user, passcode, wait_on_receipt=True, version=ver) self.conn.set_listener('', self) self.conn.start() self.transaction_id = None self.version = ver self.__subscriptions = {} self.__subscription_id = 1 self.prompt = '> '
def __init__(self): self.conn = Connection() Thread.__init__(self) WhiteBoard.__init__(self) self._init_mouse_event() self.setDaemon(True) self.isMouseDown = False self.x_pos = None self.y_pos = None self.last_time = None self.line_x1, self.line_y1 = None, None self.line_x2, self.line_y2 = None, None
def View_Record(self): #create object of class Connection con1=Connection() con=con1.conn() #Fire Select Query query="SELECT * FROM Employee" #prepared cursor cur=con.cursor() #run select query cur.execute(query) result=cur.fetchall() #fetchone() to display only first record print(result) cur.close() con.close()
def delete_record(self): con1 = Connection() con = con1.conn() empid = int(input("Enter employee id no. to be delete=>")) q = "delete from employee where empid=%s" cur = con.cursor() try: cur.execute(q, empid) con.commit() print("record delete successfully") except: print("record not found") cur.close() con.close()
def Search_Record(self): eid=int(input("Enter employee id no. to be Searched : ")) #create object of class Connection con1=Connection() con=con1.conn() #Fire Select Query query="SELECT * FROM Employee where Empid=%s" #prepared cursor cur=con.cursor() #run select query cur.execute(query,eid) result=cur.fetchall() #fetchone() to display only first record print(result) cur.close() con.close()
def delete(self): con1=Connection() con=con1.conn() eid=int(input("Enter Employee id no. to be deleted : ")) #Fire delete Query query="delete FROM Employee WHERE Empid=%s" #prepared cursor cur=con.cursor() #run delete query try: cur.execute(query,eid) self.con.commit() #to changes permanently in table print("Delete Record successfully") except: print("Record not found") cur.close() con.close()
def search_record(self): empid = int(input("Enter Employee id")) con1 = Connection() con = con1.conn() query = "SELECT * FROM employee where empid=%s" cur = con.cursor() try: cur.execute(query, empid) con.commit() except Exception: print("Query Error") else: result = cur.fetchall() print(result) cur.close() con.close()
def __init__(self, path, default, quiet): '''Constructs the bot object. Takes path, default, and quiet arguments from the command line input and sets the bot accordingly. Initializes logging, creates instances of necessary classes. Loads plugins, begins the connection.''' self._config_path = path self._default = default self._quiet = quiet self.logger = logging.getLogger("GorillaBot") self._configuration = Configure(self._config_path, self._default, self._quiet) settings = self._configuration.get_configuration() self.GorillaConnection = Connection( self, settings["host"], settings["port"], settings["nick"], settings["ident"], settings["realname"], settings["chans"], settings["botop"], settings["fullop"]) self.GorillaCommander = CommandManager(self, self.GorillaConnection) self.GorillaConnection._connect()
await queue.join() # cancel all workers log.debug('cancelling workers') for w in workers: w.cancel() # wait until all worker tasks are cancelled await asyncio.gather(*workers) if __name__ == '__main__': util.patchAsyncio() ib = IB() barSize = '30 secs' wts = 'TRADES' # object where data is stored store = ArcticStore(f'{wts}_{barSize}') # the bool is for cont_only holder = ContractHolder(ib, 'contracts.csv', store, wts, barSize, False, aggression=1) asyncio.get_event_loop().set_debug(True) # util.logToConsole(DEBUG) Connection(ib, partial(main, holder), watchdog=True) log.debug('script finished, about to disconnect') ib.disconnect() log.debug('disconnected')
def __init__(self, host='localhost', port=61613, user='', passcode=''): self.conn = Connection([(host, port)], user, passcode) self.conn.set_listener('', self) self.conn.start() self.__commands = get_commands() self.transaction_id = None
class queriesClass: """class for queries about limits etc.""" db = Connection() merged1 = None user_id = None userPreferences = None count = 0 list_length = 25 dfList = None toPrint = None alreadyTry = False userNum = 0 usersUsed = None def __init__(self): self.db.connect() #here we get the user \new user id from the sign up system def return_suggested_products(self, userID): #for now we are not counting the avarage list length, because we still #dont have enough receipts and the function returns small amount (10), so after checking the avarage #receipts size for family, and suggested list size for existing costumer, we decide to take first 25 products #and suggedst it to new user #when our data will grow, we can use this function ##############self.countAvarageListSize() query = "SELECT [limit_id] FROM [db_shopagent].[dbo].[user_ref_limits] WHERE [user_id]=" + userID self.userPreferences = pd.DataFrame( pd.read_sql(query, self.db.connection)) #get user limits self.user_id = userID #save userID for later if we will need it #if the user do not exist\have no limits if (self.userPreferences.empty): self.noLimits() #if the user have only one limit elif (len(self.userPreferences.index) == 1): self.count = 0 #self.checkIfPartialOrNot(self.userPreferences.iloc[0]['limit_id']) self.returnDfOfOneLimitQuery(self.userPreferences) #if the user have few limits else: #print(userPreferences.iloc[0]['limit_id']) self.returnFewLimitsQuery(self.userPreferences) #there is no option to get empty list, because even if a mistake accured and #there is no user with the given id, we still want to return suggestion list #after all the calculations we return the suggestion self.printToCsv(self.dfList) return (self.dfList) #if the user have no limits, we take from the db array of all existing limits, because #we can reccomend everything for this user def noLimits(self): query = "SELECT [limit_id] FROM [db_shopagent].[dbo].[user_ref_limits]" userPreferences = pd.DataFrame(pd.read_sql( query, self.db.connection)) #get all limits self.alreadyTry = False self.returnFewLimitsQuery(userPreferences) #a function to check if the preferences of the user is partial (only part of the family have this limit) def checkIfPartialOrNot(self, limitId): #variable user_id saves the info of the user queryForPart = "SELECT [Partial] FROM [db_shopagent].[dbo].[user_ref_limits] WHERE [limit_id]=" + str( limitId) + " And [user_id]=" + str(self.user_id) val = pd.DataFrame(pd.read_sql(queryForPart, self.db.connection)) #get user limits #if the match not partial we can continue to finding the list if (val.iloc[0]['Partial'] == 0): self.returnDfOfOneLimitQuery(limitId) #what should we do if the user have only one limit and it's partial? else: self.learningForPatial() #function to count avarage list size, for now the database not big enough, so we decided to give a reccomendation of 25 #products def countAvarageListSize(self): countReceipt = pd.DataFrame( pd.read_sql("SELECT COUNT (*) FROM [db_shopagent].[dbo].[receipt]", self.db.connection)) countProducts = pd.DataFrame( pd.read_sql("SELECT COUNT (*) FROM [db_shopagent].[dbo].[receipt]", self.db.connection)) a = int(countReceipt.iloc[0]) b = int(countProducts.iloc[0]) self.list_length = round(a / b) * 10 #we use this function if we get empty suggestion list, because then its relevant def learningForPatial(self, limitNum): #if i'm partial or full and there is no suggested list for the user, i can learn from any one- and from those who are single too. #after that i'm doing merge, and sending it to calculate list, to find the best options #when the user have few limits we get an int variable, and when there is #only one limit we get a data frame, so we have to check what do we get #to use the right variable in our query few_values = {} if (self.count == 0): a = limitNum.iloc[0]['limit_id'] query = "SELECT [user_id] FROM [db_shopagent].[dbo].[user_ref_limits] WHERE [limit_id]=" + str( a) #+" AND [Partial]=0" A = pd.DataFrame(pd.read_sql( query, self.db.connection)) #users wity "full" limit dataframe else: A = pd.DataFrame(columns=['user_id']) if (limitNum.size > 1): index = 0 for index, row in limitNum.iterrows(): query = "SELECT [user_id] FROM [db_shopagent].[dbo].[user_ref_limits] WHERE [limit_id]=" + str( row['limit_id']) #+" AND [Partial]=0" few_values[index] = pd.DataFrame( pd.read_sql(query, self.db.connection) ) #users wity "full" limit dataframe index = 0 while (index < len(few_values)): A = pd.concat([A, few_values[index]]) index = index + 1 self.user_id = A.size # query="SELECT [user_id] FROM [db_shopagent].[dbo].[user_ref_limits] WHERE [limit_id]="+str(a)+" AND [Partial]=1" query_shoppingList = "SELECT [user_id],[receipt_id] FROM [db_shopagent].[dbo].[receipt_ref_user]" query_items = "SELECT [receipt_id],[product_id],[quantity] FROM [db_shopagent].[dbo].[receipet_ref_product]" queryForFew = "SELECT [limit_id] FROM [db_shopagent].[dbo].[user_ref_limits]" userPreferences = pd.DataFrame( pd.read_sql(queryForFew, self.db.connection)) #get all limits #data frame of all users with choosen A = pd.DataFrame(pd.read_sql(query, self.db.connection)) #users by limits B = pd.DataFrame(pd.read_sql(query_shoppingList, self.db.connection)) #receipt by users C = pd.DataFrame(pd.read_sql(query_items, self.db.connection)) #products by receipy B.append(self.usersUsed) B = B.drop_duplicates(keep=False) self.usersUsed = B self.usersUsed = pd.concat([self.usersUsed, B]) A.append(userPreferences) if (A.empty or B.empty): self.merged1 = None self.noLimits() else: #only users and receipt merged = pd.merge(A, B, on='user_id') #user receipt and products self.merged1 = pd.merge(merged, C, on='receipt_id') #print(merged,'\n') #print(self.merged1,'\n') #i dont need the user or the receipt id anymore self.merged1.__delitem__('user_id') self.merged1.__delitem__('receipt_id') #print(self.merged1) #how many users - different- we have n_users = merged.user_id.unique().shape[0] #how many differen products we have n_product = self.merged1.product_id.unique().shape[0] #print(n_users,n_product) #i collecting the items to groups- by counting the numbers they were bought self.merged1.groupby('product_id') #print(self.merged1) self.alreadyTry = True self.calculateList(limitNum) #if after all the manipulation we still don't have suggested list for the costumer we will give him #basic suggested list for user with no limits if (self.dfList == None): self.noLimits() #there is similarity to leartningForPartial function, but here we choose only users with "full" limits #where the limits is not partial def returnDfOfOneLimitQuery(self, limitNum): #when the user have few limits we get an int variable, and when there is #only one limit we get a data frame, so we have to check what do we get #to use the right variable in our query if (self.count == 0): a = limitNum.iloc[0]['limit_id'] else: a = limitNum query = "SELECT [user_id] FROM [db_shopagent].[dbo].[user_ref_limits] WHERE [limit_id]=" + str( a) + " AND [Partial]=0" query_shoppingList = "SELECT [user_id],[receipt_id] FROM [db_shopagent].[dbo].[receipt_ref_user]" query_items = "SELECT [receipt_id],[product_id],[quantity] FROM [db_shopagent].[dbo].[receipet_ref_product]" queryOnlyOne = """select [user_id] from [db_shopagent].[dbo].[user_ref_limits] where [user_id] in (select user_id from [db_shopagent].[dbo].[user_ref_limits] group by [user_id],[limit_id] having count(*) =1 and [limit_id]=""" + str( limitNum) + ") AND [Partial]=0" A = pd.DataFrame(pd.read_sql( query, self.db.connection)) #users wity "full" limit dataframe B = pd.DataFrame(pd.read_sql(query_shoppingList, self.db.connection)) #receipt by users C = pd.DataFrame(pd.read_sql(query_items, self.db.connection)) #products by receipy B.append(self.usersUsed) B = B.drop_duplicates(keep=False) self.usersUsed = B #we can get empty data frames of users with the limits that we need. #but in the function of few limits we have to check all the limits, and it #not important for us in this function to get non empty array, because we will take care of it #at the function self.user_id = A.size #only users and receipt merged = pd.merge(A, B, on='user_id') #user receipt and products self.merged1 = pd.merge(merged, C, on='receipt_id') #print(merged,'\n') #print(self.merged1,'\n') #i dont need the user or the receipt id anymore self.merged1.__delitem__('user_id') self.merged1.__delitem__('receipt_id') #print(self.merged1) #how many users - different- we have n_users = merged.user_id.unique().shape[0] #how many differen products we have n_product = self.merged1.product_id.unique().shape[0] #print(n_users,n_product) #i collecting the items to groups- by counting the numbers they were bought #self.merged1.groupby('product_id')['quantity'].sum() #print(self.merged1) if (self.count == 0): self.calculateList(limitNum) def calculateList(self, limitsArray): #self.merged1 = self.merged1.groupby(['product_id']).agg({'quantity': 'count'}).reset_index() #print(self.merged1) #grouped_sum = products_grouped['quantity'].median() #products_grouped['percentage'] = products_grouped['quantity'].div(grouped_sum)*100 #products_grouped.sort_values(['quantity', 'product_id'], ascending = [0,1]) #ab=self.merged1.loc[self.merged1['product_id'] == 573] #n=ab['quantity'].median() #dfMed.loc[0] = [str(ab.iloc[0]['product_id']),n] #print (dfMed) #dfMed.iloc[0]=sign #i prefered not delete this part because it worked well enough and if there would be an errors #maybe i will use part of it ################################################################################################### self.usersUsed = None mm = pd.DataFrame(columns=['product_id', 'quantity']) #we don't want that one user will affect all the list (for example 150 milk bottels was bought by one user) #so we calculate the median if (self.merged1.empty): self.learningForPatial(limitsArray) mm = self.merged1.groupby( 'product_id')['quantity'].median().reset_index() #if the suggested list size (25 for now) smaller then the suggested shopping list #we will return less products if (self.list_length > mm.size): self.list_length = mm.size #we take the products with largest median #we take the products with largest median mm = mm.nlargest(self.list_length, 'quantity') self.dfList = mm['product_id'].tolist() print('Ans:' + str(self.dfList)) if (self.dfList == None): self.learningForPatial(limitsArray) # self.printToCsv(self.dfList) #print print #when we get few limits for one user def returnFewLimitsQuery(self, limitsArray): df = pd.DataFrame(columns=['product_id', 'quantity']) df1 = pd.DataFrame(columns=['product_id', 'quantity']) df2 = pd.DataFrame(columns=['product_id', 'quantity']) df3 = pd.DataFrame(columns=['product_id', 'quantity']) isinA = {} self.count = 1 #variable to use in another functions, because we use there a function of dataframe_collection = {} #"oneLimitQuery, we need to return to our function and this variable help us to decide if (self.alreadyTry == False): for index, row in limitsArray.iterrows(): self.returnDfOfOneLimitQuery(row['limit_id']) #we append all the dataframes that we get after calculation of every limit by itself #and then we send it to "calculate list dataframe_collection[index] = self.merged1 self.merged1 = None #new_df = pd.merge(A_df, B_df, how='left', left_on=['A_c1','c2'], right_on = ['B_c1','c2']) index = 0 while (index < len(dataframe_collection)): sign = [df, dataframe_collection[index]] df = pd.concat(sign) print("dsf") if (index == 0): new_df = df elif (dataframe_collection[index].empty): new_df.append(dataframe_collection[index]) else: new_df = pd.merge(new_df, dataframe_collection[index], on=['product_id']) index = index + 1 if (new_df.empty): new_df['quantity'] = "" else: if 'Total' in new_df: new_df = new_df.drop('Total', 1) if 'quantity_x' in new_df: new_df = new_df.ix[:, ~new_df.columns.duplicated()] new_df['quantity'] = new_df['quantity_x'] else: new_df['quantity'] = "" if 'quantity_x' in new_df: new_df = new_df.drop('quantity_x', 1) if 'quantity_y' in new_df: new_df = new_df.drop('quantity_y', 1) #df2['Total']=df2['Total'] = df.groupby('product_id')['quantity'].sum() #df2=df = df2[pd.notnull(df2['Total'])] #remve all row where all value is 'NaN' exists #df2['quantity']=df2['Total'] #df2 = df2.drop('Total', 1) #print(df2.sort_values('product_id', ascending=False)) #df1=df2.groupby('product_id').count() self.merged1 = new_df self.alreadyTry = True self.calculateList(limitsArray) else: if (self.count == 0): self.learningForPatial(limitsArray) else: self.dfList = None return #at the futer if we will use a server instade of calculating new list every time #we can save template of suggested list for every limit #and recalculate it only at the end of the day for example def printToCsv(self, dfta): df = pd.DataFrame(columns=['product_id', 'product_name']) for row in dfta: A = pd.DataFrame( pd.read_sql( "SELECT [product_id],[product_name] FROM [db_shopagent].[dbo].[products] WHERE [product_id]=" + str(row), self.db.connection)) #users by limits sign = [df, A] df = pd.concat(sign) df.to_csv("cvName", encoding='utf-8')
from crop_faces import Crop_faces from connect import Connection crop_faces = Crop_faces('./crops/', './crops_data.json', './models/model_faces.pb') connect = Connection('cam1')
from connect import Connection conn = Connection() conn.connect_to_fantasy_commander() query = """ SELECT name, year, team, hr, war FROM fangraphs_batter_standard WHERE team = 'steamer' """ conn.cursor.execute(query) for row in conn.cursor: print row
for w in workers: w.cancel() # wait until all worker tasks are cancelled await asyncio.gather(*workers) if __name__ == '__main__': util.patchAsyncio() ib = IB() barSize = '30 secs' wts = 'TRADES' # object where data is stored store = ArcticStore(f'{wts}_{barSize}') holder = ContractHolder(ib, 'contracts.csv', store, wts, barSize, True, aggression=0.5) asyncio.get_event_loop().set_debug(True) # util.logToConsole(DEBUG) Connection(ib, partial(main, holder), watchdog=False) log.debug('script finished, about to disconnect') ib.disconnect() log.debug('disconnected')