def isHaveName(name): mysql = connect_db.connect_db() if mysql.isHaveName(name): mysql.close() return 1 mysql.close() return 0
def test_recommend(user_id): mysql = connect_db.connect_db() recommend = recommend_movies.comput_recommend(user_id) test_movies = mysql.getTestMovies(user_id) mysql.close() test = [] stdout_backup = sys.stdout num = len(test_movies) N = 0 NN = 0 for row in recommend: if int(row[0]) > 0: test.append([int(row[0]),row[1],0]) #print(test) for i in range(len(test)): for row in test_movies: if row[0] == test[i][0]: test[i][2] = row[1] N += 1 if row[1] >= 3: NN += 1 break #for row in test: # print(row) log_file = open("log.log", "a") sys.stdout = log_file if N == 0: print(user_id, N/num, 0) else: print(user_id, N/num, NN/N) sys.stdout = stdout_backup log_file.close()
def getUserName(user_id): mysql = connect_db.connect_db() sign = mysql.getUserName(user_id) mysql.close() if sign is '': return 0 return sign
def main(args): config = parse_json_config(args.config) connectdb = connect_db(config=config) globusdb = globus_db(config=config, connect_db=connectdb) globusdb.add_group(args.projectfile, username="******", group_parent=args.parent)
def getMovieDetail(movie_id): mysql = connect_db.connect_db() movie_detail = system_object.MoviesDetail() movie_detail.base.Mid = movie_id if mysql.getMovieDetail(movie_detail) == -1: mysql.close() return 0 #_spider.movies_detail_view(movie_detail) mysql.close() return movie_detail
def getUserHaveWatch(user_id): mysql = connect_db.connect_db() user = system_object.User(user_id) user.name = mysql.getUserName(user_id) have_watch = mysql.getUserMovies(user_id) for i in range(len(have_watch)): mysql.getUserMovieDetail(have_watch[i]) user.movies = have_watch mysql.close() return user
def get_db(): """Opens a new database connection if there is none yet for the current application context. """ if not hasattr(g, 'azure_db'): g.azure_db = connect_db() g.azure_db.autocommit = True g.azure_db.set_attr(pyodbc.SQL_ATTR_TXN_ISOLATION, pyodbc.SQL_TXN_SERIALIZABLE) return g.azure_db
def main(args): print(datetime.datetime.now()) config = parse_json_config(args.config) connectdb = connect_db(config=config) globusdb = globus_db(config=config, connect_db=connectdb) # Get all groups globus_groups = globusdb.get_all_groups(get_summary=True) # Create new groups if necessary new_groups = [ grp for grp in globus_groups if grp["name"] not in connectdb.groups.keys() ] for ng in new_groups: log.info("Creating group %s", ng["name"]) connectdb.add_group(ng) if len(connectdb.groups.keys()) != len(globus_groups): # Removing groups that were deleted ggroups = [grp["name"] for grp in globus_groups] deleted_groups = [ grp for grp in connectdb.groups.keys() if (grp not in ggroups and "globus_uuid" in connectdb.groups[grp].keys()) ] for dg in deleted_groups: log.info("Removing group %s", dg) connectdb.groups.pop(dg, None) # Update group information, mainly number of users for ggrp in globus_groups: log.debug("Updating group %s", ggrp["name"]) connectdb.update_group(ggrp) # Get all users globus_members = globusdb.get_all_users(get_user_groups=True) # Get and create new users new_users = [ user for user in globus_members if (user["username"] not in connectdb.users.keys() and user["groups"] != ["connect"]) ] for nu in new_users: # Create new users if necessary log.info("Creating user %s", nu["username"]) connectdb.add_user(nu) if len(connectdb.users.keys()) != len(globus_members): gusernames = [user["username"] for user in globus_members] removed_users = [ user for user in globus_members if user["username"] not in gusernames ] for rm in removed_users: connectdb.set_user_nologin(rm) # Update user information, mainlt for gusr in globus_members: log.debug("Updating user %s", gusr["username"]) connectdb.update_user(gusr) connectdb.write_db()
def main(args): config = parse_json_config(args.config) connectdb = connect_db(config=config) new_users = {} for username, user_info in connectdb.users.iteritems(): user_info["condor_schedd"] = random.randint(1, 5) user_info["connect_project"] = None if ["connect"] == user_info["groups"]: continue new_users[username] = user_info connectdb.users = new_users connectdb.write_db()
def maitainInitMovies(): mysql = connect_db.connect_db() movies = mysql.gettop100Movies() mo = '' if mo is -1: mysql.close() return -1 for i in movies: mo += str(i) + ',' mo = mo[:-1] s = mysql.updateInitMovies(mo, -1) mysql.close() return s
def maintain_similarity(user_id): mysql = connect_db.connect_db() num = 943 #num = mysql.getNumUser() if num == -1: return -1 main_user = mysql.getUserMovies(user_id) lens_main = len(main_user) for i in range(user_id + 1, num + 1): if i == user_id: continue temp = mysql.getUserMovies(i) #print(main_user) lens_temp = len(temp) k = 0 j = 0 same = [[], []] while k < lens_main and j < lens_temp: if main_user[k].Mid < temp[j].Mid: k += 1 elif main_user[k].Mid > temp[j].Mid: j += 1 else: same[0].append(float(main_user[k].user_score)) same[1].append(float(temp[j].user_score)) k += 1 j += 1 if len(same[0]) == 0: mysql.updateSimilarity(user_id, i, -1, 0, 0, 0) continue sum1 = sum(same[0]) / len(same[0]) sum2 = sum(same[1]) / len(same[1]) sim1 = 0 sim2 = 0 sim3 = 0 for k in range(0, len(same[0])): sim1 += (same[0][k] - sum1) * (same[1][k] - sum2) sim2 += (same[0][k] - sum1)**2 sim3 += (same[1][k] - sum2)**2 sim1 += 1 sim2 += 1 sim3 += 1 just = float(len(same[0])) / 20 if just > 1: just = 1 sim = float(sim1) / (math.sqrt(sim2) * math.sqrt(sim3)) * just if mysql.updateSimilarity(user_id, i, sim, sim1, sim2, sim3) == -1: return -1 mysql.close() return 1
def create_qiwi_api(): conn, c = connect_db() uid_child = str(uuid.uuid4()) table = """ create table tokenQIWI( idToken integer not null primary key, token varchar(32), parent integer not null, status integer ) """ c.executemany(table) conn.commit() conn.close()
def insertMovieScore(user_id, movie_id, movie_score): mysql = connect_db.connect_db() sign = mysql.isMovieWatch(user_id,movie_id) if sign == -1: mysql.close() return -1 elif sign == 0: if mysql.insertMovieScore(user_id,movie_id,movie_score) == -1: mysql.close() return -1 else: if mysql.updateUserMovieScore(user_id,movie_id,movie_score) == -1: mysql.close() return -1 mysql.close() return 1
def test_score(user_id): mysql = connect_db.connect_db() recommend = recommend_movies.comput_recommend(user_id) test_movies = mysql.getTestMovies(user_id) have_watch = mysql.getUserMovies(user_id) test = [] stdout_backup = sys.stdout num = len(test_movies) log_file = open(str(user_id) + "_recommend", "a") sys.stdout = log_file for row in recommend: i = 1 for r in have_watch: if int(row[0]) < 0 or int(row[0]) == r.Mid: i = 0 break if i: test.append([int(row[0]),row[1],0,0]) similarity_users = mysql.getSimiUsers(user_id, 0.3) for i in range(len(test)): sign = 1 for row in test_movies: if row[0] == test[i][0]: test[i][2] = row[1] sign = 0 break #print(sign) average = [] for j in similarity_users: if sign == 0: break temp = mysql.getUserMovieScore(j, test[i][0]) #print(test[i][0],j) #print(temp) if temp != -1: average.append(temp) if sign: average = float(sum(average))/len(average) test[i][3] = average print(test[i][0], test[i][1], test[i][2], test[i][3]) sys.stdout = stdout_backup log_file.close() mysql.close()
def category_insert(cat_id, cat_name): db = None try: db = connect_db.connect_db('thunder') # 데이터 삽입 sql 정의 with db.cursor() as cursor: sql = ''' insert INTO category values("%d","%s") ''' % (cat_id, cat_name) cursor.execute(sql) db.commit() # 커밋 except Exception as e: print(e) finally: if db is not None: db.close()
def maintainAverageScore(movie_id): mysql = connect_db.connect_db() watch_time, num_score = mysql.getMovieWatch(movie_id) #print(watch_time,num_score) if watch_time == -1: mysql.close() return -1 elif watch_time == 0: if mysql.updateMovieWatch(movie_id, watch_time, 0) == -1: mysql.close() return -1 else: mysql.close() return 1 average = num_score/watch_time if mysql.updateMovieWatch(movie_id, watch_time, average) == -1: mysql.close() return -1 mysql.close() return 1
for i in data: try: if i.h4.string == 'Country:': movies_detail.country = i.a.string elif i.h4.string == 'Language:': movies_detail.language = i.a.string elif i.h4.string == 'Runtime:': movies_detail.runtime = float(i.time.string.split()[0]) except: pass movies_detail.base.Name = temp return 1 if __name__ == '__main__': mysql = connect_db.connect_db() stdout_backup = sys.stdout i = 1683 pbar = tqdm.tqdm(total=1682) pbar.update(1683) while (i < 1683): #pbar.update(i) log_file = open("log.log", "a") movies_detail = system_object.MoviesDetail() movies_detail.base.Mid = i movies_detail.base.Name = mysql.getMoviesName( movies_detail.base.Mid) #'Toy Story (1995)' sys.stdout = log_file k = 0 while k < 10: if spider(movies_detail) != 0:
line = line.strip('\n') line = line.split("|") informationList.append(line) #print(informationList) sql_command = """INSERT INTO Rental( cid, mid, date_and_time, status) VALUES (?,?,?,?)""" cursor.executemany(sql_command, informationList) def dropTables(conn): conn.execute("DROP TABLE IF EXISTS Rental") conn.execute("DROP TABLE IF EXISTS Customer") conn.execute("DROP TABLE IF EXISTS RentalPlan") conn.execute("DROP TABLE IF EXISTS Movie") if __name__ == "__main__": conn = connect_db() dropTables(conn) loadRentalPlan("RentalPlan.txt", conn) loadCustomer("Customer.txt", conn) loadMovie("Movie.txt", conn) loadRental("Rental.txt", conn) conn.commit() conn.close()
def gen_input_args(g_tool, g_tool_name, outputdir, logd, directory_hierarchy, ftest, fcdb, toa, btime, etime, dt, CONFIG_DICT): """ generate "input_args", which in a dictionary that holds all the varaibles needed for your commands """ for inputdir, pf, seq, cdt, tmp, num in directory_hierarchy: input_args = dict(inputdir=inputdir, pf=pf, seq=seq, cdt=cdt, num=num) # gen paths for input files: # (when organizing, input files could also be outputfiles) # xtcf, grof, proxtcf, progrof, tprf, edrf, ndxf input_args.update(gen_input_files(inputdir, pf)) # if g_tool is from organize module, no new dir needs to be created # to compare string, is doesn't work (confirmed) if not g_tool.__module__ == 'g_analyze.organize': # anadir should be a subfolder under outputdir anadir = os.path.join(outputdir, 'r_' + g_tool_name) input_args['anadir'] = anadir if not os.path.exists(anadir) and not ftest: os.mkdir(anadir) # this part will be improved later, particular when using a database if fcdb: import connect_db as cdb ss = cdb.connect_db(CONFIG_DICT['database']) query = ss.query(cdb.Cutoff_rg_alltrj).filter_by(sqid=seq) time_for_b = query.value(cdt) input_args['b'] = time_for_b else: input_args['b'] = 0 # default # particular to make_ndx if toa == 'g_make_ndx': ndx_id = CONFIG_DICT['ndx_input'] # ndx_input_dict ndx_fd = CONFIG_DICT['ndx_format'] # ndx_format_dict # from pprint import pprint as pp input_args['ndx_input'] = ' '.join( [ndx_id[ndx_fd[f].format(**locals())] for f in ndx_fd] ) if toa == 'g_select': input_args['g_select_select'] = ("'" + CONFIG_DICT['g_select']["seq"] + CONFIG_DICT['g_select'][ cdt ] + "'") # particular to sequence_spacing, maybe later toa need also to be # checked for other analysis, as well. if toa == 'sequence_spacing': from mysys import read_mysys mysys = read_mysys.read() # input_args['peptide_length'] = mysys[seq].len input_args['peptide_length'] = mysys[seq].len # when analyzing ff_comparison if toa == 'trjorder': from mysys import read_mysys mysys = read_mysys.read() try: input_args['NA'] = mysys[cdt].natom # when analyzing ff_comparison except KeyError: print "ASSUME NUMBER OF ATOMS PER SOLVENT MOLECULE IS THAT OF WATER: {0}".format(mysys['w'].natom) input_args['NA'] = mysys['w'].natom # input_args['bin'] = os.path.join(os.environ['HOME'], "exec/gromacs-4.0.5/exec/bin/") # dirty, fix later! input_args['bin'] = os.path.join(os.environ['HOME'], "exec/gromacs-4.5.5/exec/bin/") if cdt in ['h', 'f']: # Heptanol input_args['bin'] = os.path.join(os.environ['HOME'], "exec/gromacs-4.5.5/exec/bin/") # if cdt in ['h', 'f']: # Heptanol # os.environ['PATH'] = (os.path.join(os.environ['HOME'], # "exec/gromacs-4.5.5/exec/bin/") # + os.environ['PATH']) input_args['pwd'] = os.getenv('PWD') input_args['b'] = btime # beginning time input_args['e'] = etime # endding time input_args['dt'] = dt # endding time # generate the command that is gonna be executed cmd = g_tool(input_args) logf = os.path.join(logd, '{0}.log'.format(pf)) if logd else None yield (cmd, logf)
#!/usr/bin/env python import pyorient import pika import sys import json #import pysiddhi from time import sleep from connect_db import connect_db from lib_sheldon import close_brackets, subtract_bed #set up data storage client=connect_db() #siddhiManager = SiddhiManager() #siddhiApp = "define stream patientStream (zipcode int, patient_status_code int);" + \ # "@info(name = 'incoming')" + \ # "from patientStream" # Set the connection parameters to connect to rabbit-server1 on port 5672 # on the / virtual host using the username "guest" and password "guest" username = '******' password = '******' hostname = 'vcbumg2.cs.uky.edu' #VM testing and deployment virtualhost = '1' #hostname = '128.163.202.50' #local testing
def launch_web_api(): # launch Database client = connect_db() # launch web application app = Flask(__name__) reset_app(client) # testing APIs @app.route('/test') def get_status(): #query start_time = time.time() hostname = socket.gethostname() current_time = time.time() exec_time = time.time() - start_time status = 'online' #package responce = dict() responce['hostname'] = hostname responce['current_time'] = current_time responce['exec_time'] = exec_time responce['status'] = status #encode and respond return json.dumps(responce) @app.route('/patient_dumpdata') def patient_dump(): return dump_db(client, "Patient") @app.route('/hospital_dumpdata') def hospital_dump(): return dump_db(client, "Hospital") @app.route('/kydist_count') def kydist_dump(): return dump_row_count(client, "kyzipdistance") @app.route('/reset_beds') def reset_beds(): filename = "hospitals_totalbed.txt" return load_hospital(client, filename) #MF1 API @app.route('/api/getteam') def getteam(): team = dict() team['team_name'] = "505Team" team['Team_members_sids'] = ["12535791", "10456246"] team['app_status_code'] = "1" #encode and respond return json.dumps(team) #MF 2 @app.route('/api/reset') def reset(): return reset_app(client) #OF 2 @app.route('/api/getpatient/<string:mrn>') def getpatient(mrn): location_code = getlocationcode(mrn) patient = dict() patient['mrn'] = str(mrn) patient['location_code'] = str(location_code) #encode and respond return json.dumps(patient) #OF 3 @app.route('/api/gethospital/<string:id>') def gethospital(id): beds = getbeds(id, client) response = dict() response['total_beds'] = beds["total_beds"] response['avalable_beds'] = beds["beds"] response['zipcode'] = beds["zip"] return json.dumps(response) #-----------------------------------------# #RTR1 @app.route('/api/zipalertlist') def zipalertlist(): return getzipalertlist() #RTR2 @app.route('/api/alertlist') def alertlist(): return getalertlist() #RTR3 @app.route('/api/testcount') def testcount(): return gettestcount() return app
#!/usr/bin/python # -*- coding: utf-8 -*- import datetime from autumn.model import Model from connect_db import connect_db # 初始化DB连接 connect_db() class Frame(Model): """帧""" class Meta: defaults = {"create_time": datetime.datetime.now, "up_num": 0}
def init_db(path_to_db): with closing(connect_db(path_to_db)) as db: ftm_path = path.join('.', 'data', 'schema.sql') with open(ftm_path, mode='r') as f: db.cursor().executescript(f.read()) db.commit()
#!/usr/bin/python # -*- coding: utf-8 -*- import datetime from autumn.model import Model from connect_db import connect_db #初始化DB连接 connect_db() class Frame(Model): '''帧''' class Meta: defaults = { 'create_time': datetime.datetime.now, 'up_num': 0, }
def insert_user(name, password): mysql = connect_db.connect_db() re = mysql.insert_user(name, password) mysql.close() return re
def accessCheck(name, password): mysql = connect_db.connect_db() sign = mysql.accessCheck(name, password) mysql.close() return sign
Created on Tue Mar 24 10:46:50 2020 @author: evantesei """ import pandas as pd import matplotlib.pyplot as plt import connect_db as connect_db # goal is to compare three sets of SoC data: # 1. SoC query using TAS used in Joe's forecast -- merchant level, industry average from 2019 data # 2. SoC query using Enhance Analytics -- merchant level, industry average from 2020 data, eliminating outlier merchants # 3. #1. but only limited to merchants with valid Enhance Analytics data, then compare # establish connection con = connect_db.connect_db() soc_tos = """ select merchant_ari, industry, sum(tos) as sales, sum(loan_vol) as vol_tos, sum(loan_count) as count, vol_tos/count as aov, vol_tos/sales as soc_tos FROM ( select lv.merchant_ari, industry,
import asyncio import websockets import ssl import json import random from connect_db import connect_db DB = connect_db() DB.see_dbs() #G = '{"_event":"bulk-subscribe","tzID":8,"message":"pid-1057391:%%pid-1061443:%%pid-1057392:%%pid-1061453:%%pid-1061794:%%pid-169:%%pid-166:%%pid-172:%%pid-24441:%%pid-178:%%pid-171:%%pid-14958:%%pid-8830:%%pid-8849:%%pid-1:%%pid-13994:%%pid-23705:%%pid-2:%%pid-3:%%pid-4:%%pid-5:%%pid-7:%%pid-20:%%pid-27:%%pid-179:%%pid-8873:%%pid-8839:%%pid-8833:%%pid-44336:%%pid-8827:%%event-412518:%%event-412521:%%event-412472:%%event-411533:%%event-411537:%%event-411534:%%event-411535:%%isOpenExch-1:%%isOpenExch-2:%%isOpenPair-8873:%%isOpenPair-8839:%%isOpenPair-44336:%%isOpenPair-8827:%%domain-1:"}' G = '{"_event":"bulk-subscribe","tzID":8,"message":"pid-8830:%%pid-8849:%%pid-2186:%%pid-8833:"}' #G = '{"_event":"bulk-subscribe","tzID":8,"message":"pid-8830:"}' #G = '{"_event":"bulk-subscribe","tzID":8,"message":"pid-8833:"}' G1 = "{\"_event\":\"UID\",\"UID\":0}" #28 pad G2 = ["{\"_event\":\"heartbeat\",\"data\":\"h\"}"] async def hello(): async with websockets.connect( 'wss://stream202.forexpros.com/echo/288/y5irwbm8/websocket', ssl=ssl.SSLContext(protocol=ssl.PROTOCOL_TLS), ping_interval=None) as websocket: response = await websocket.recv() print("< {}".format(response)) await websocket.send(json.dumps(G)) await websocket.send(json.dumps(G1)) OP = 0 IOP = random.choice([10, 29, 15]) while True: try: response = await websocket.recv()