def __str__(self): start = self.start end = self.end if isinstance(self.start, datetime): start = self.start.astimezone(RawEvent.BASE_TZ) end = self.getEnd(base=start) if start and end: sDate = utils.getDate(start) eDate = utils.getDate(end) if not isinstance(start, datetime): evDate = start.strftime('%B %d %Y') else: evDate = start.strftime('%B %d %Y from %H:%M') if sDate == eDate: if isinstance(end, datetime): evDate += end.strftime(' to %H:%M') else: if not isinstance(end, datetime): evDate += end.strftime(' to %B %d %Y') else: evDate += end.strftime(' to %B %d %Y at %H:%M') elif start: if not isinstance(start, datetime): evDate = start.strftime('%B %d %Y') else: evDate = start.strftime('%B %d %Y at %H:%M') else: evDate = '<unknown date>' extra = f' ({self.extras})' if self.extras else '' return f'{self.title}, at {self.location or "<somewhere>"} on {evDate}{extra}'
def decodeValue(self, value, dbf): """Return a ``datetime.date`` instance decoded from ``value``.""" value = value.strip(' \0') if value: return utils.getDate(value) else: return None
def updateMyStock(account, myStock): collection = db['stock'] date, _ = getDate() for data in myStock: data['date'] = date data['account'] = account collection.update_one({'date':date, 'account':account, 'code':data['code']}, {'$set':data}, upsert=True)
def generate_user_data(self): regInput = self.collect_input() question1 = str(self.ques1.currentText()) question2 = str(self.ques2.currentText()) title = str(self.titleField.text()) or "Guess" token = getToken() timelog = getTime() sysInfo = get_user_location() productID = sysInfo['Product ID'] ip, cityIP, countryIP = get_local_pc_info() unix = getUnix() datelog = getDate() pcOS = sysInfo['os'] pcUser = sysInfo['pcUser'] pcPython = sysInfo['python'] if not os.path.exists(self.rawAvatarPth): avatar = get_avatar_image('default') else: avatar = self.rawAvatarPth data = [ regInput[0], regInput[1], regInput[3], regInput[4], title, regInput[5], regInput[6], regInput[7], regInput[8], regInput[9], regInput[10], regInput[11], token, timelog, productID, ip, cityIP, countryIP, unix, question1, regInput[12], question2, regInput[13], datelog, pcOS, pcUser, pcPython, avatar ] return data
def readDataFromFile(type, fromDate, toDate): year = int(fromDate[0:4]) month = int(fromDate[4:6]) day = int(fromDate[6:8]) data = [] if len(fromDate) == 8: if not (os.path.exists(type + "/" + fromDate) and os.path.exists(type + "/" + toDate)): return data while True: currentDateGroup = utils.getDate(year, month, day) currentDate = currentDateGroup[0] [year, month, day] = currentDateGroup[2:] fileName = type + "/" + currentDate if not os.path.exists(fileName): day = day + 1 continue else: fileObject = open(fileName) for line in fileObject: data.append(line.strip()) fileObject.close() if currentDate == toDate: break day = day + 1 return data else: # TODO return data
def parse(self, response): page = response.url.split("/")[-2] filename = DATA_PATH + getDate() + '-%s-worldometers.csv' % page with open(filename, 'w') as f: categories = modifyNames( response.css('#main_table_countries_today th::text') [1:25].getall()) categories.insert(0, '#') categories.append('Test/1M pop') for i in range(len(categories) - 1): f.write(categories[i] + ',') f.write(categories[len(categories) - 1] + '\n') country = 9 while country < 223: curr = modifyData( response.xpath( '//*[@id="main_table_countries_today"]/tbody[1]/tr[' + str(country) + ']').css('tr ::text').getall())[:-4] if len(curr) == 15: for a in range(len(curr) - 2): f.write(curr[a] + ',') f.write(curr[-2] + '\n') elif len(curr) == 14: for a in range(len(curr) - 1): f.write(curr[a] + ',') f.write(curr[-2] + '\n') country += 1 f.close() goLog( 'Corona_Spider: Successfully crawed data from the target website.') uploadData(filename, 5000)
def readDataFromFile(type, fromDate, toDate) : year = int(fromDate[0 : 4]) month = int(fromDate[4 : 6]) day = int(fromDate[6 : 8]) data = [] if len(fromDate) == 8 : if not(os.path.exists(type + "/" + fromDate) and os.path.exists(type + "/" + toDate)) : return data while True : currentDateGroup = utils.getDate(year, month, day) currentDate = currentDateGroup[0] [year, month, day] = currentDateGroup[2:] fileName = type + "/" + currentDate if not os.path.exists(fileName) : day = day + 1 continue else : fileObject = open(fileName) for line in fileObject : data.append(line.strip()) fileObject.close() if currentDate == toDate : break day = day + 1 return data else : # TODO return data
def updateProfit(account, profit): collection = db['profit'] date, _ = getDate() update = profit update['date'] = date update['account'] = account collection.update_one({'date':date, 'account':account}, {'$set':update}, upsert=True)
def decodeValue(self, value): """Return a ``datetime.date`` instance decoded from ``value``.""" value = value.decode(utils.ENCODING) if value.strip(): return utils.getDate(value) else: return None
def compileCommon(filepath, token, force=False): """通用编译方法 编译 @tm:file_path@为6位时间戳 Arguments: - `content`: """ if force: content = filepath else: if not os.path.exists(filepath): return False ftype = filepath.split('.')[-1] if not ftype in ['html', 'htm', 'css', 'js', 'tpl', 'jsp']: return False content = utils.readfile(filepath) TM_TOKEN = '@tm:(.*?)@' DATE_TOKEN = '@date@' COMMON_TOKEN = '@(.*?)@' iters = re.finditer(TM_TOKEN, content) for i in reversed(list(iters)): content = content[0:i.start(0)] + getFileTimeStamp( i.group(1), filepath) + content[i.end(0):] iters = re.finditer(DATE_TOKEN, content) for i in reversed(list(iters)): content = content[0:i.start(0)] + utils.getDate() + content[i.end(0):] iters = re.finditer(COMMON_TOKEN, content) for i in reversed(list(iters)): config = conf.getConfig() name = i.group(1) value = (token and config[token].get(name)) or config.get(name) if value: if value.find('{num}') != -1: num = (token and config[token].get('num')) or config.get('num') or '10' num = range(num + 1) substr100 = content[i.end(0):i.end(0) + 100] istimestamp = substr100.find('t=') if istimestamp != -1: #has timestamp try: tm = int(substr100[istimestamp + 2:istimestamp + 3]) except ValueError: continue if tm >= len(num): tm = tm - len(num) value = value.replace('{num}', str(tm)) else: global range_item value = value.replace('{num}', str(num[range_item])) range_item = range_item + 1 if range_item >= len(num): range_item = 0 content = content[0:i.start(0)] + value + content[i.end(0):] return content
def getSignal(page=1, codeName=None, so='1year', is_paging=False): date, initialDate = getDate(so=so) collection = db['signal'] if is_paging: per_page = page_default['per_page'] offset = (page - 1) * per_page if codeName is None: data_list = collection.find({'date':{'$gt':initialDate}}, sort=[('date', -1)]).limit(per_page).skip(offset) else: data_list = collection.find({'codeName':codeName, 'date':{'$gt':initialDate}}, sort=[('date', -1)]).limit(per_page).skip(offset) count = data_list.count() paging = paginate(page, per_page, count) return paging, data_list else: date, initialDate = getDate() data_list = collection.find({'date':date}) return data_list
def getHistoryXJ(dateYear, dateMonth, dateDay, dataFolder) : currentDate = utils.getDate(dateYear, dateMonth, dateDay) fromDate = currentDate[0] [dateYear, dateMonth, dateDay] = currentDate[2:] endDate = utils.getDate(dateYear, dateMonth, dateDay + 1)[0] if not os.path.exists(dataFolder) : os.makedirs(dataFolder) if os.path.exists(dataFolder + "/" + fromDate) : print "skip " + dataFolder + "/" + fromDate else : url = "http://www.xjflcp.com/trend/analyseSSC.do?operator=goldSscTrend&type=draw&drawBegin="\ + fromDate + "&drawEnd=" + endDate s = getBeautifulSoup(url) writeDataToFileXJ(s, fromDate, dataFolder) return [dateYear, dateMonth, dateDay]
def compileCommon(filepath, token, force=False): """通用编译方法 编译 @tm:file_path@为6位时间戳 Arguments: - `content`: """ if force: content = filepath else: if not os.path.exists(filepath): return False ftype = filepath.split(".")[-1] if not ftype in ["html", "htm", "css", "js", "tpl", "jsp"]: return False content = utils.readfile(filepath) TM_TOKEN = "@tm:(.*?)@" DATE_TOKEN = "@date@" COMMON_TOKEN = "@(.*?)@" iters = re.finditer(TM_TOKEN, content) for i in reversed(list(iters)): content = content[0 : i.start(0)] + getFileTimeStamp(i.group(1), filepath) + content[i.end(0) :] iters = re.finditer(DATE_TOKEN, content) for i in reversed(list(iters)): content = content[0 : i.start(0)] + utils.getDate() + content[i.end(0) :] iters = re.finditer(COMMON_TOKEN, content) for i in reversed(list(iters)): config = conf.getConfig() name = i.group(1) value = (token and config[token].get(name)) or config.get(name) if value: if value.find("{num}") != -1: num = (token and config[token].get("num")) or config.get("num") or "10" num = range(num + 1) substr100 = content[i.end(0) : i.end(0) + 100] istimestamp = substr100.find("t=") if istimestamp != -1: # has timestamp try: tm = int(substr100[istimestamp + 2 : istimestamp + 3]) except ValueError: continue if tm >= len(num): tm = tm - len(num) value = value.replace("{num}", str(tm)) else: global range_item value = value.replace("{num}", str(num[range_item])) range_item = range_item + 1 if range_item >= len(num): range_item = 0 content = content[0 : i.start(0)] + value + content[i.end(0) :] return content
def updateChart(chart, so='5year'): collection = db['chart'] date, initialDate = getDate(so=so) initialDate = int(initialDate) for data in chart: if int(data['date']) < initialDate: break else: collection.update_one({'code':data['code'], 'date':data['date']}, {'$set':data}, upsert=True)
def processFiles(folder, dfs): files = glob.glob(folder + "/NASDAQ*.txt") for fn in files: #[0:10]: y, m, d = u.getDate(fn) # no need for the index_col if there is no index column in the file # being read. df = pd.read_csv(fn) # add an index [0,1,2,...] df.columns = ["sym", "date", "o", "h", "l", "c", "vol"] # Symbols are unique in fn dfs.append(df)
def printClock(xy, img: Image, today: datetime): x = xy[0] y = xy[1] draw = ImageDraw.Draw(img) draw.rectangle((x, y, rbtv_config.screen_width, 65), fill=0) draw.text((x, y - 12), utils.getTime(today), font = rbtv_config.fontHuge, fill = 255) draw.text((x + 200, y + 3), utils.getWeekday(today), font = rbtv_config.fontSmall, fill = 255) draw.text((x + 200, y + 32), utils.getDate(today), font = rbtv_config.fontSmall, fill = 255) draw.rectangle((0, 0, 600, 140))
def encodeValue(self, value): """Return a string-encoded value. ``value`` argument should be a value suitable for the `utils.getDate` call. Return: Return value is a string in format "yyyymmdd". """ return utils.getDate(value).strftime("%Y%m%d")
def getAccountInfo(account, page=1, is_paging=False): collection = db['account'] if is_paging: per_page = page_default['per_page'] offset = (page - 1) * per_page data_list = collection.find({'account':account}, sort=[('date', -1)]).limit(per_page).skip(offset) count = data_list.count() paging = paginate(page, per_page, count) return paging, data_list else: date, _ = getDate() accountInfo = collection.find_one({'date':date, 'account':account}) return accountInfo
def getHistory(dateYear, dateMonth, dateDay, dataFolder, lotId) : currentDate = utils.getDate(dateYear, dateMonth, dateDay) fromDate = currentDate[1] [dateYear, dateMonth, dateDay] = currentDate[2:] if os.path.exists(dataFolder + "/" + fromDate) : print "skip " + dataFolder + "/" + fromDate url = "http://chart.cp.360.cn/kaijiang/kaijiang?lotId="+lotId+"&spanType=2&span="+fromDate+"_" + fromDate s = getBeautifulSoup(url) if not os.path.exists(dataFolder) : os.makedirs(dataFolder) writeDataToFile(s, currentDate[0], dataFolder) return [dateYear, dateMonth, dateDay]
def encodeValue(self, value): """Return a string-encoded value. ``value`` argument should be a value suitable for the `utils.getDate` call. Return: Return value is a string in format "yyyymmdd". """ if value: return utils.getDate(value).strftime("%Y%m%d").encode(utils.ENCODING) else: return b" " * self.length
def __init__( self, fields=None, headerLength=0, recordLength=0, recordCount=0, signature=0x03, lastUpdate=None, ignoreErrors=False, ): """Initialize instance. Arguments: fields: a list of field definitions; recordLength: size of the records; headerLength: size of the header; recordCount: number of records stored in DBF; signature: version number (aka signature). using 0x03 as a default meaning "File without DBT". for more information about this field visit ``http://www.clicketyclick.dk/databases/xbase/format/dbf.html#DBF_NOTE_1_TARGET`` lastUpdate: date of the DBF's update. this could be a string ('yymmdd' or 'yyyymmdd'), timestamp (int or float), datetime/date value, a sequence (assuming (yyyy, mm, dd, ...)) or an object having callable ``ticks`` field. ignoreErrors: error processing mode for DBF fields (boolean) """ self.signature = signature if fields is None: self.fields = [] else: self.fields = list(fields) self.lastUpdate = getDate(lastUpdate) self.recordLength = recordLength self.headerLength = headerLength self.recordCount = recordCount self.ignoreErrors = ignoreErrors # XXX: I'm not sure this is safe to # initialize `self.changed` in this way self.changed = bool(self.fields)
def getBestBets(self): date = utils.getDate() start = date.strftime("%m/%d/%Y+20:00:00") end = date.strftime("%m/%d/%Y+23:00:00") payload = {"optimum_id": self.optimum_ID, "deviceId": self.device_ID, "deviceType": self.device_type, "os": self.device_os, "AuthToken": self.AuthToken, "hubId": self.hub_ID, "startTime": start, "endTime": end, "wifiRSSI": "NA"} conn = requests.get(base_url.best_bets, params=payload) # TODO parse XML data if conn.status_code == 200: xml_dom = minidom.parseString(conn.text) blocks = xml_dom.getElementsByTagName("block") best_bets = [] best_bets_objs = [] for block in blocks: this_block = {} if block.getAttribute("class") == "core": attributes = block.getElementsByTagName("classifier") for attribute in attributes: this_block[attribute.getAttribute("type")] = attribute.childNodes[0].data best_bets.append(this_block) for best_bet in best_bets: new_best_bet_obj = BestBet(best_bet["PositiveContent"], best_bet["ProgramTitle"], best_bet["ProgramId"], best_bet["PrgSvcId"], best_bet["FullPrgSvcName"], best_bet["EventID"], best_bet["AirDateTime"], best_bet["Duration"], best_bet["Language"], best_bet["Channelnumber"], best_bet["ProgramType"], best_bet["tvrating"], best_bet["mpaa"], best_bet["Callsign"], None) best_bets_objs.append(new_best_bet_obj) return best_bets_objs return False
def encodeValue(self, value, dbf): """Return a string-encoded value. ``value`` argument should be a value suitable for the `utils.getDate` call or None. Return: Return value is a string in format "yyyymmdd" or empty string """ result = self.emptyValue if value: value = utils.getDate(value) try: result = value.strftime("%Y%m%d") except ValueError, e: pass
def encodeValue(self, value, dbf): """Return a string-encoded value. ``value`` argument should be a value suitable for the `utils.getDate` call or None. Return: Return value is a string in format "yyyymmdd" or empty string """ result = self.emptyValue if value: value = utils.getDate(value) try: result = value.strftime("%Y%m%d") except ValueError, e: print e
def admin(): # Number of article in the database number_article = utils.getNumberArticle() # Number of article in archive Ivory Cost all_article_archive = update.app() # Number of article in archive Ivory Cost which has not problem rest_article_archive = update.difference() # Date of the moment date_of_now = utils.getDate() # return year actual year = utils.getYear() return render_template('admin.html', title = "Administration | Gawa Côte d'Ivoire ", number_article = number_article, all_article_archive = all_article_archive, rest_article_archive = rest_article_archive, date_of_now = date_of_now, year = year)
def getBestBets(self): date = utils.getDate() start = date.strftime("%m/%d/%Y+20:00:00") end = date.strftime("%m/%d/%Y+23:00:00") payload = { "optimum_id": self.optimum_ID, "deviceId": self.device_ID, "deviceType": self.device_type, "os": self.device_os, "AuthToken": self.AuthToken, "hubId": self.hub_ID, "startTime": start, "endTime": end, "wifiRSSI": "NA" } conn = requests.get(base_url.best_bets, params=payload) # TODO parse XML data if conn.status_code == 200: xml_dom = minidom.parseString(conn.text) blocks = xml_dom.getElementsByTagName("block") best_bets = [] best_bets_objs = [] for block in blocks: this_block = {} if block.getAttribute("class") == "core": attributes = block.getElementsByTagName("classifier") for attribute in attributes: this_block[attribute.getAttribute( "type")] = attribute.childNodes[0].data best_bets.append(this_block) for best_bet in best_bets: new_best_bet_obj = BestBet( best_bet["PositiveContent"], best_bet["ProgramTitle"], best_bet["ProgramId"], best_bet["PrgSvcId"], best_bet["FullPrgSvcName"], best_bet["EventID"], best_bet["AirDateTime"], best_bet["Duration"], best_bet["Language"], best_bet["Channelnumber"], best_bet["ProgramType"], best_bet["tvrating"], best_bet["mpaa"], best_bet["Callsign"], None) best_bets_objs.append(new_best_bet_obj) return best_bets_objs return False
def generateGraph(input_csv, file_path, port=PSQL_PORT, columns=['Country/Other', 'Total Cases'], upload=False): sns.set(style='whitegrid') data = pd.read_csv(input_csv, encoding='ISO-8859-1') data = data[columns][data['Total Cases'] > 5000].sort_values( by='Total Cases', ascending=False) f, ax = plt.subplots(figsize=(10, 20)) sns.barplot(x='Total Cases', y='Country/Other', data=data) plt.title('COVID-19 Confirmed Cases') plt.xlabel('Confirmed Cases') plt.ylabel('Country') plt.show() file_name = file_path + \ ut.getDate() + 'COVID-19_Confirmed_Cases.png' plt.savefig(file_name) if upload: uploadData(file_name, port, 'corona_img')
def getChart(code, isJson=False, so='1year'): collection = db['chart'] date, initialDate = getDate(so=so) chart = [] if isJson: chartData = collection.find({'code':code, 'date':{'$gt':initialDate}}, sort=[('date', 1)]) for data in chartData: del data['_id'] chart.append(data) return chart else: isNext = True lastDate = None lastChart = collection.find_one({'code':code}, sort=[('date', -1)]) if lastChart is not None: lastDate = lastChart['date'] chartData = collection.find({'code':code, 'date':{'$gt':initialDate}}, sort=[('date', -1)]) for data in chartData: del data['_id'] chart.append(data) if date == lastDate: isNext = False return isNext, lastDate, chart
def processFiles(folder, files): dfs = [] for fn in files: y,m,d = u.getDate(fn) # no need for the index_col if there is no index column in the file # being read. df = pd.read_csv(fn, index_col=0) #, index_col=0) df.columns = ["date", "o", "h", "l", "c", "vol"] # Symbols are unique in fn df.index.names = ['sym'] dfs.append(df) df = pd.concat(dfs) # Get unique symbols in the index syms = df.index.values for sym in syms: # Create condition on the row label dfsym = df.loc[df.index == sym] # Perhaps I should remove the symbol name as well, since it the name of the file #dfsym.drop("sym", inplace=True, axis=1) dfsym.to_csv(folder + "/" + sym + ".txt",index=True) print("sym= ", sym, fn, folder)
def getChartSignal(code, type='granville', so='1year'): date, initialDate = getDate(so=so) collection = db['signal'] signals = collection.find({'code':code, 'type':type, 'date':{'$gt':initialDate}}, sort=[('date', 1)]) signalDict = {} for signal in signals: del signal['_id'] signalDict[signal['date']] = signal collection = db['chart'] chartData = collection.find({'code':code, 'date':{'$gt':initialDate}}, sort=[('date', 1)]) chart = [] buySignals = [] sellSignals = [] for idx, data in enumerate(chartData): del data['_id'] chart.append({'x':idx, 'date':data['date'], 'close':data['close'], 'ma120':data['ma120'], 'ma20':data['ma20'], 'std20':data['std20']}) if data['date'] in signalDict: if signalDict[data['date']]['trade'] == 'buy': buySignals.append({'x':idx, 'close':data['close']}) elif signalDict[data['date']]['trade'] == 'sell': sellSignals.append({'x':idx, 'close':data['close']}) return chart, buySignals, sellSignals
def bot_reply(text): # text = input() response = "" if text == None: return f"{get_emoji()}" if text != None: for i in text: if i in emoji.UNICODE_EMOJI: response = response + f"{get_emoji()}" else: response = response + f"{get_emoji()}" if text != None: for i in text.split(): if i.lower() in [ "bye", "leave", "exit", "quit", 'bye', 'see you', 'goodbye', 'good bye', 'exit', 'leave', 'go', 'tata', 'see ya' ]: return end_conv(i) response = response + greeting(text) if "who are you" in text.lower() or "name yourself" == text.lower( ) or "tell your name" == text.lower() or "" in text.lower( ) == "whats you name" in text.lower() or "what's your name" == text.lower( ) or "what is your name" == text.lower() or "what your name" == text.lower( ): response += random.choice([ "I am buddy, don't like teddy, but i still love candy. HA HA HA!!!", "Buddy Here. ", "Buddy your friend. " ]) if "who is your botmaster" in text.lower( ) or "name of your botmaster" in text.lower( ) or "name your botmaster" in text.lower( ) or "who is your master" in text.lower( ) or "name of your master" in text.lower( ) or "name your master" in text.lower(): return random.choice( ["Udit is the one who made me ... ", "U.D.I.T <- He is the one."]) if "tell me about your botmaster" in text.lower( ) or "tell about botmaster" in text.lower( ) or "about botmaster" in text.lower( ) or "tell me about your master" in text.lower( ) or "tell about master" in text.lower() or "about master" in text.lower(): response += "Well, he is a programmer. To know more about him check this out https://github.com/uditkumar01" if 'date' in text.lower(): get_date = getDate() response = response + ' ' + get_date if 'what is the time' in text.lower() or 'Tell me the time' in text.lower( ) or 'Tell me time' in text.lower( ) or 'what\'s the time' in text.lower() or 'whats the time' in text.lower( ) or 'check the time' in text.lower() or 'check the clock' in text.lower( ) or 'check time' in text.lower() or 'check clock' in text.lower(): get_time = getTime() response = response + ' ' + get_time if 'who is' in text.lower(): # print(text) words = text.split() person = " ".join(words[2:]) # person = wikipedia.suggest(person) print("person", person) my_results = wikipedia.search(text) all_results = [] for i in my_results: if person.lower() == i.lower(): return response + ' ' + search_wiki(person) if "bill" in i.lower(): all_results.append(i) print("check1", all_results) if len(all_results) > 1: return f"Which {person} you are talking about: {' ,'.join(all_results)}" # response = person response = response + ' ' + search_wiki(person) if 'what is' in text.lower(): words = text.split() person = " ".join(words[2:]) # response = person response = response + ' ' + search_wiki(person) if "i" not in text.lower() or "name" not in text.lower(): text = spell_check(text.split()) if response == "": response = replying(text) return response
def uploadData(upload_file, port=PSQL_PORT, table_name='default', data=''): try: psycopg2.connect(user='******', password='******', host='127.0.0.1', port=port, database='postgres') except: ut.goLog('PostgresSQL_uploadData: Failed to connect to PSQL', level='error') return connection = psycopg2.connect(user='******', password='******', host='127.0.0.1', port=port, database='postgres') cursor = connection.cursor() if upload_file.endswith('csv'): try: f = open(upload_file, 'r') except IOError: raise IOError('Cannot read the file.') lines = f.readlines() f.close() columns = lines[0].replace('/', '_').replace(' ', '_').lower()[2:] columns_list = columns.split(',') if date == '': table_name = 'corona_data_' + ut.getDate().replace('-', '_') else: table_name = 'corona_data_' + date create_table_query = 'CREATE TABLE ' + table_name + ' (' for column in columns_list: create_table_query += column + ' text NOT NULL,' create_table_query = create_table_query[:-1] + ');' try: cursor.execute(create_table_query) except: ut.goLog('PostgresSQL_uploadData: Failed to create table.', level='error') return cursor.execute('SELECT * FROM ' + table_name) existing_column = [desc[0] for desc in cursor.description] datas = lines[1:] add_data_query = 'INSERT INTO ' + table_name + ' (' + columns + ')' + ' VALUES ' for data in datas: data = data.split(',') del data[0] data_query = '(' + ut.prepareData(data) + ');' cursor.execute(add_data_query + data_query) elif upload_file.endswith('.png'): f = open(upload_file, 'rb') data = f.read() f.close() date = ut.getDate() add_data_query = 'INSERT INTO ' + table_name + ' (date, img) VALUES (' + date + ', ' + str( psycopg2.Binary(data)) + ')' cursor.execute(add_data_query) ut.goLog('PostgresSQL_uploadData: Successfully uploaded data to Database') connection.commit() connection.close()
def updateAccountInfo(update): collection = db['account'] date, _ = getDate() update['date'] = date collection.update_one({'date':date, 'account':update['account']}, {'$set':update}, upsert=True)
def readSizes(): #replica level information phedexInfo = {} #dataset level information phedexDatasetInfo = {} #site,dataset,rdate,gid,min_date,max_date,ave_size,max_size,days colsPhedex = { "site": -1, "dataset": -1, "rdate": -1, "min_date": -1, "max_date": -1, "ave_size": -1, "max_size": -1, "days": -1, "gid": -1 } colPhedexNames = colsPhedex.keys() nCount = 0 print("### use phedexDataFile %s" % phedexDataFile) print("### testDS %s" % testDS) istream = fopen(phedexDataFile) for l in istream: nCount = nCount + 1 #optionaly test things on a subset of data if isTest and nCount > 10000: print "Incomplete data as you are just testing" break sp = l.strip().split(',') #use the first row to understand the set of columns #stop if the data is not in the expected format if nCount == 1: for col in colPhedexNames: for i in range(0, len(sp)): if col == sp[i]: colsPhedex[col] = i if colsPhedex[col] == -1: print "missing column", col print("File: %s" % phedexDataFile) sys.exit(1) # print("### colsPhedex", colsPhedex) else: #create the dictionaries from the phedex csvs dataset = sp[colsPhedex["dataset"]] site = sp[colsPhedex["site"]] rdate = sp[colsPhedex["rdate"]] gid = sp[colsPhedex["gid"]] #skip anything that is relval if 'RelVal' in dataset: continue key = (dataset, site, rdate, gid) #should become try: blah except: blah if dataset not in phedexDatasetInfo: phedexDatasetInfo[dataset] = [] #this can then be used to look up detailed information in phedexInfo dictionary phedexDatasetInfo[dataset].append((site, rdate, gid)) datum = {} for col in colPhedexNames: if col == "site": continue if col == "dataset": continue datum[col] = sp[colsPhedex[col]] #catch errors - there should never be a repeated key if key in phedexInfo: print "Duplicated key" print key print sp print phedexInfo[key] sys.exit(1) #done, just store everything.. phedexInfo[key] = datum if testDS in key: print("### testDS", key, datum) istream.close() replicas = phedexInfo.keys() nRep = len(replicas) #now make dataset level arrays that contain day-by-day size on T1/T2 disk #do that for analysis ops and comp ops and gid=-1 (which is a nonsense value) esDictKeys = ["All", "AnaOps", "AllOps", "MinusOne"] effectiveSizesDict = {} effectiveSizesFunc = {} for key in esDictKeys: effectiveSizesDict[key] = {} method = "is" + key effectiveSizesFunc[key] = globals()[method] print("phedexDatasetInfo", len(phedexDatasetInfo.keys()), "size", object_size(phedexDatasetInfo)) #loop over dataset and replicas for dataset, keyInfos in phedexDatasetInfo.iteritems(): #again, skip relvals here - even if there should be none if "/RelVal" in dataset: continue #create the arrays cacheES = {k: numpy.zeros(nDays) for k in esDictKeys} for key, val in cacheES.iteritems(): effectiveSizesDict[key][dataset] = val #get the list replicas for this dataset #keyInfos=phedexDatasetInfo[dataset] #loop over them for keyInfo in keyInfos: site = keyInfo[0] #skip things that are not T1 or T2 if not site.startswith("T1") and not site.startswith("T2"): continue if not use_only_tier2 and not site.startswith("T2"): continue #get the detailed phedex information for this replica phKey = (dataset, ) + keyInfo phDatum = phedexInfo[phKey] d1 = getDate(phDatum["min_date"]) d2 = getDate(phDatum["max_date"]) #compute the range of days that this replica was on disk indEnd = (d2 - dStartOldest).days if d2 < dEnd else nDays - 1 if indEnd < 0: continue #sample was gone before the period we are looking at indStart = (d1 - dStartOldest).days if d1 > dStartOldest else 0 #just some printouts for debugging if you want them if dataset == testDS: print site, phKey, phDatum print d1, d2 print "start and end", indStart, indEnd print float(phDatum['ave_size']) #set the daily size to the average seen in the phedex dumps for key, val in effectiveSizesFunc.iteritems(): if val(keyInfo): cacheES[key][indStart:indEnd + 1] += float( phDatum['ave_size']) return effectiveSizesDict