def get_Question3(): if path.exists('./lib/AAPL_Daily_Candle.csv'): df = pd.read_csv('./lib/AAPL_Daily_Candle.csv') else: config = configparser.ConfigParser() config.read('./lib/application.config') key = config['finnhub']['key'] resolution = config['question2']['resolution'] symbol = config['question2']['symbol'] start_time = config['question2']['start_time'] end_time = config['question2']['end_time'] start = tools.date_to_epoch(start_time) end = tools.date_to_epoch(end_time) res = tools.getStockCandles(key, symbol, resolution, start, end) res.pop('s', None) df = pd.DataFrame(res) df.to_csv('./lib/AAPL_Daily_Candle.csv') # Insert Date column in df df.insert(0, "Date", "") # convert epotch time to datetime stamp in Date column df['Date'] = pd.to_datetime(df['t'], unit='s') df.insert(7, "daily_return", float, allow_duplicates=True) df1 = tools.cal_DailyReturn(df) return df1
def main(argv): if 'auto' in argv: auto = True else: auto = False #chargement des outils fic, ext = os.path.splitext(argv[0]) fic = fic.split('/')[-1] tools = Tools(auto, 'autre') tools.fichiers = make_dic_fic(fic) tools.coureurs = tools.htmtoliste() if tools.checkCoureurs(tools.coureurs): tools.writeCsv(tools.coureurs, tools.fickikou) print(BOLD, "--> Fichier {} traité".format(tools.fichtm), RESET) print( BOLD, "Fichier {} pret à tranférer sur KiKourou".format(tools.fickikou), RESET) # sauvegarde du htm fic = 'mv {} {}/sv_fic_source' sv = os.system(fic.format(tools.fichtm, REPWORK)) else: tools.writeCsv(tools.coureurs, tools.ficcsv) print(BOLD, "Fichier {} pourri".format(tools.fichtm), RESET) for cle, valeur in tools.anos.items(): print(BOLD, "\nAnomalie de type : ", cle, RESET) for ano in valeur: print('\t{}'.format(ano))
def test_clean_string(self): input_string = " legitimate-.,;:_·<>+\\|/'#@()\"\t\n\r!%&=?¡¿ text " expected_string = "legitimate text" p = Processing() output_string = Tools.clean_string(input_string, StringProcessor.unwanted_chars) output_string = Tools.clean_spaces(output_string) self.assertEqual(expected_string, output_string)
def get_Question1(): if path.exists('./lib/AAPL_1Day_Candle.csv'): df = pd.read_csv('./lib/AAPL_1Day_Candle.csv') else: config = configparser.ConfigParser() config.read('./lib/application.config') key = config['finnhub']['key'] resolution = config['finnhub']['resolution'] symbol = config['finnhub']['symbol'] start_time = config['finnhub']['start_time'] end_time = config['finnhub']['end_time'] start = tools.date_to_epoch(start_time) end = tools.date_to_epoch(end_time) res = tools.getStockCandles(key, symbol, resolution, start, end) res.pop('s', None) df = pd.DataFrame(res) df.to_csv('./lib/AAPL_1Day_Candle.csv') # Insert Date column in df df.insert(0, "Date", "") # convert epotch time to datetime stamp in Date column df['Date'] = pd.to_datetime(df['t'], unit='s') df.insert(7, "avg_price", float, allow_duplicates=True) df["avg_price"] = (df['h'] + df['l'] + df['c']) / 3 df.insert(8, "cumulative_volume", int, allow_duplicates=True) df.insert(9, "avg_price_volume", float, allow_duplicates=True) df.insert(10, "cumulative_price_volume", float, allow_duplicates=True) df.insert(11, "VWAP", float, allow_duplicates=True) for index in df.index: df.at[index, 'avg_price_volume'] = df.at[index, 'v'] * df.at[index, 'avg_price'] if index == 0: df.at[index, 'cumulative_volume'] = df.at[index, 'v'] df.at[index, 'cumulative_price_volume'] = df.at[index, 'avg_price_volume'] else: df.at[index, 'cumulative_volume'] = df.at[ index - 1, 'cumulative_volume'] + df.at[index, 'v'] df.at[index, 'cumulative_price_volume'] = df.at[ index - 1, 'cumulative_price_volume'] + df.at[index, 'avg_price_volume'] df.at[index, 'VWAP'] = df.at[index, 'cumulative_price_volume'] / df.at[ index, 'cumulative_volume'] return df #get_Question1()
def process(self, input_string): # Transliterate to ascii final_str = self.trans.to_ascii(input_string) # Clean useless chars and trim spaces: final_str = Tools.clean_string(final_str, StringProcessor.unwanted_chars) final_str = Tools.clean_spaces(final_str) # Uppercase the string final_str = final_str.upper() return final_str
def getContainerInfo(self, container_id): try: ContainerInfo = self.__conn.inspect_container(container_id) ContainerInfo['State']['FinishedAt'] = Tools.formatXMLtime( ContainerInfo['State']['FinishedAt']) ContainerInfo['State']['StartedAt'] = Tools.formatXMLtime( ContainerInfo['State']['StartedAt']) ContainerInfo['Created'] = Tools.formatXMLtime( ContainerInfo['Created']) return ContainerInfo except Exception, e: print e
def getImageInfo(self, image_id): try: ImageInfo = self.__conn.inspect_image(image_id) ImageInfo['Created'] = Tools.formatXMLtime(ImageInfo['Created']) return ImageInfo except Exception, e: print e
def process(self, input_string): # Trim spaces and remove unwanted chars: final_str = Tools.clean_string(input_string, NumberProcessor.unwanted_chars) final_str = Tools.clean_spaces(final_str) try: # Parse the number to the most general one (float) number = float(final_str) # Turn the number to a string again (to homogenise representation) final_str = str(number) except ValueError: logging.warning("Number string could not be parsed: " + input_string + " (original)" + final_str + " (after cleaning)") # If number cannot be parsed, return original string final_str = input_string return final_str
def main(argv): if 'auto' in argv: auto = True else: auto = False #chargement des outils tools = Tools(auto) lnk = input('Lien : ') epreuve = Epreuve(lnk) for e in epreuve.courses: if e.course.lower() not in ['relais']: print(BOLD + '{} --> {} '.format(e.nomCourse, e.course) + RESET) tools.fichiers = make_dic_fic('{}_{}'.format( e.nomCourse, e.course)) tools.coureurs = tools.htmtoliste(e.lienCourse, e.data) if tools.checkCoureurs(tools.coureurs): tools.writeCsv(tools.coureurs, tools.fickikou) print(BOLD, "{} --> course {} traitée".format(e.nomCourse, e.course), RESET) print( BOLD, "Fichier {} pret à tranférer sur KiKourou".format( tools.fickikou), RESET) # sauvegarde du htm fic = 'mv {} {}/sv_fic_source' #sv = os.system(fic.format(tools.fichtm, REPWORK)) else: tools.writeCsv(tools.coureurs, tools.ficcsv) print(BOLD, "Fichier {} pourri".format(tools.ficcsv), RESET) for cle, valeur in tools.anos.items(): print(BOLD, "\nAnomalie de type : ", cle, RESET) for ano in valeur: print('\t{}'.format(ano)) else: '''on vire les 'relais' ''' print(BOLD + '\n{} --> {} non traité !!!'.format(e.nomCourse, e.course) + RESET)
def process(self, input_string): # Trim spaces and remove unwanted chars: final_str = Tools.clean_string(input_string, DateProcessor.unwanted_chars) final_str = Tools.clean_spaces(final_str) try: # Parse the date with multiple approaches date_object = dateparser.parse(final_str) if date_object is None: raise TypeError # Turn the date to a string again (use ISO format) final_str = str(date_object.date().isoformat()) except TypeError: logging.warning("Date string could not be parsed: <" + input_string + "> (original) - <" + final_str + "> (after cleaning)") # If date cannot be parsed, return original string final_str = input_string return final_str
def matchingChars(possibleC, possibleChars): listOfMatchingChars = [] for possibleMatchingChar in possibleChars: if possibleMatchingChar == possibleC: continue distanceBetweenChars = Tools.distanceBetweenChars( possibleC, possibleMatchingChar) angleBetweenChars = Tools.angleBetweenChars( possibleC, possibleMatchingChar) changeInArea = float( abs(possibleMatchingChar.boundingRectArea - possibleC.boundingRectArea)) / float( possibleC.boundingRectArea) changeInWidth = float( abs(possibleMatchingChar.boundingRectWidth - possibleC.boundingRectWidth)) / float( possibleC.boundingRectWidth) changeInHeight = float( abs(possibleMatchingChar.boundingRectHeight - possibleC.boundingRectHeight)) / float( possibleC.boundingRectHeight) if distanceBetweenChars < (possibleC.diagonalSize * 5) and \ angleBetweenChars < 12.0 and \ changeInArea < 0.5 and \ changeInWidth < 0.8 and \ changeInHeight < 0.2: listOfMatchingChars.append(possibleMatchingChar) return listOfMatchingChars
def __init__(self, smMetadata: MsMetadata, key, key_id=None, retries=1, validate=True): # Session Identifier self.sessId = None # Key ID of this client self.key_id = key_id # Key of this client self.key = key # Send retries self.retries = retries # Metadata of the Session Manager microservice self.smMetadata = smMetadata trusted_keys = {Tools.sha256_fingerprint(smMetadata.rsaPublicKeyBinary): smMetadata.rsaPublicKeyBinary} # HTTPSig client instance self.httpsig = HttpSigClient(self.key, trusted_keys, key_id=self.key_id, retries=self.retries) self.httpsig.doValidateResponse(validate)
def read_ppm(DBname, ppmDir, ppmList=None, SMD=None): if SMD == None: SMD = '../images/ShapeMatcher' print("Converting", ppmDir, "...") with cd(SMD): # Directory finagling begins with cd(ppmDir): # finagling x 2 OS = p.system() if OS == 'Windows': contents = cmd('dir') elif OS in ['Darwin', 'Linux']: contents = cmd('ls') else: s = sorry.format("Unknown Operating System") print(s) return None if contents.returncode != 0: s = sorry.format("Couldn't read the directory.") print(s) return None # There is a built in ShapeMatcher commands that reads everything in a folder # But I found it to be buggy at times string = str(contents) string = string[string.find(ppmDir) + 1:] fileList = string.split(".ppm") fileList = fileList[:-1] # Need to work backwards from the end of the file name # This is why file names can't have spaces for i in range(len(fileList)): file = None j = -1 while j >= (-1 * len(fileList[i])): if fileList[i][j] == " ": break j -= 1 j += 1 file = fileList[i][j + len(fileList[i]):] file += ".ppm" fileList[i] = file # Now we have a list of all the files in the folder # So we can make a database consisting of all of these files # Start building the sm command for creating a database file createDB = 'sm -doExtSimp 0 -c ' + DBname + '.db' # Now add each of the files for file in fileList: createDB += " " + ppmDir + '/' + file # Make a database of the graphs cmd(createDB) # Convert database to XML file toXML = "sm -toXML " + DBname + ".xml " + DBname + ".db" cmd(toXML) print("----------") g = read_sm(DBname + ".xml") print("\t----------") t.rename_key(g, list(g.keys())[0], DBname) print("\t----------") print("\nConversion complete. Returning NetworkX graph object.") print("\nYour", DBname, "database and XML file should be in ", SMD) return g
def test_clean_spaces(self): input_string = " a b c d " expected_string = "a b c d" p = Processing() output_string = Tools.clean_spaces(input_string) self.assertEqual(expected_string, output_string)
""" Created on Tue Jul 7 19:36:23 2020 @author: Candace Todd """ # Script recording our data cleaning methodology # Note: this is an interactive program import DataReader as dr import networkx as nx import lib.Tools as t import matplotlib.pyplot as plt ################## Code for picking out outlier binary images ######################## alienDict = dr.read_sm("data/Shape-Matcher-models/ALIEN.xml") t.rename_key(alienDict, oldKey="models/ALIEN/ALIEN", newKey="alien") childDict = dr.read_sm("data/Shape-Matcher-models/m_chld.xml") t.rename_key(childDict, oldKey="models/m_chld/m_chld", newKey="child") camelDict = dr.read_sm("data/Shape-Matcher-models/camel.xml") t.rename_key(camelDict, oldKey="models/camel/camel", newKey="camel") # 17 is a good camel model horseDict = dr.read_sm("data/Shape-Matcher-models/HORSE.xml") t.rename_key(horseDict, oldKey="models/HORSE/HORSE", newKey="horse") # 52 isa good horse model eagleDict = dr.read_sm("data/Shape-Matcher-models/eagle.xml") t.rename_key(eagleDict, oldKey="models/eagle/eagle", newKey="eagle") # 35 is a good eagle model
num = 5 lst = mList if letter == "V": num = 9 lst = vList if letter == "W": num = 9 lst = wList j = 0 for i in range(num): while letter + str(j) in lst: j += 1 if j > (150 - len(lst)) - 1: break G = z[0][letters[letter]][j] G = t.main_component(G=G, report=False) pos = t.get_pos(G) l_inputs.append((G, pos)) l_labels.append(letter + str(j)) l_target.append(letter) j += 1 matrix = classify.get_matrix(l_inputs, frames, True, True, average="median") flat = classify.condense(matrix) # This plot may render with the axes flippeed points = classify.mds(input_list=l_inputs, target_list=l_target, frames=frames, D=matrix, colorize=True,
import pandas as pd import configparser import lib.Tools as tools config = configparser.ConfigParser() config.read('./lib/application.config') symbols = config['question4']['symbol'].split(',') #print(symbols[3]) start_time = config['question2']['start_time'] end_time = config['question2']['end_time'] key = config['finnhub']['key'] resolution = config['question2']['resolution'] start = tools.date_to_epoch(start_time) end = tools.date_to_epoch(end_time) symbol_dict = {} for symbol in symbols: res = tools.getStockCandles(key, symbol, resolution, start, end) res.pop('s', None) df = pd.DataFrame(res) # Insert Date column in df df.insert(0, "Date", "") # convert epotch time to datetime stamp in Date column df['Date'] = pd.to_datetime(df['t'], unit='s') df.insert(7, "daily_return", float, allow_duplicates=True)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) else: imageContours, contours, hierarchy = cv2.findContours( thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) height, width = thresh.shape imageContours = np.zeros((height, width, 3), dtype=np.uint8) possibleChars = [] countOfPossibleChars = 0 for i in range(0, len(contours)): cv2.drawContours(imageContours, contours, i, (255, 255, 255)) possibleChar = Tools.ifChar(contours[i]) if Tools.checkIfChar(possibleChar) is True: countOfPossibleChars = countOfPossibleChars + 1 possibleChars.append(possibleChar) imageContours = np.zeros((height, width, 3), np.uint8) ctrs = [] for char in possibleChars: ctrs.append(char.contour) cv2.drawContours(imageContours, ctrs, -1, (255, 255, 255)) plates_list = [] listOfListsOfMatchingChars = []