Example #1
0
    def do_GET(self):
        self.send_response(200)
        self.send_header('Content-type', 'text/html')
        self.end_headers()

        current_dir = os.path.dirname(__file__)
        self.wfile.write(
            bytes(readfile.readFile(os.path.join(current_dir, "message.txt")),
                  "utf-8"))

        return
Example #2
0
    def __init__(self):
        self.method = 0
        self.data = [None,None,None,None,None]
        self.values = [None,None,None,None,None]
        self.maximum = None
        self.names = [' Eucl Dist', ' Maha Dist', ' Eucl Vote', ' Maha Vote', '  Custom']

        file1 = open("hw5db1.txt","r")
        file2 = open("hw5db2.txt","r")
        dataIn = readfile.readFile(file1)
        stats = readfile.readStats(file2)

        self.vectors = vector_handler.vector_holder(dataIn,stats)
        self.classifier = classifier.classifier()
Example #3
0
 def setUp(self):
     self.rf = readFile()
                        "--last",
                        help="Use last query",
                        action="store_true")
    parser.add_argument("-o",
                        "--opcap",
                        help="Remove operational cap datapoints",
                        action="store_true")
    parser.add_argument("-g",
                        "--graph",
                        help="Do not display graph.",
                        action="store_false")
    options = parser.parse_args()

    # Get user creds
    if os.path.exists("creds.txt") or options.creds:
        credentials = readfile.readFile("creds.txt")
        username, password = credentials[0], credentials[1]
    else:
        username = input('Enter username: '******'Enter password: ')

    # Connect
    try:
        connection = access.connect(username, password)

        # Get user query or use last query
        if options.last:
            last_query = readfile.readFile("query.txt")
            if last_query[1] == "today":
                last_query[1] = str(
                    datetime.datetime.now().strftime("%m-%d-%Y"))
        self.indexIonBlocks = None
        self.numIonBlocks = -1
        self.szIonBlocks = None
        self.massabund = None
        self.blockind = None
        self.pepmassArr = None
        self.nameionArr = None


if __name__ == "__main__":

    import readfile
    filename = filename ="orig/data/inga_compounds_and_unpd_in_silico.mgf"
    #LASTLINE = 213
    LASTLINE=54
    arrLines = readfile.readFile(filename)
   
    print("*** TEST CASE ***")
    print("    File '{0}' contains {1} lines".format(filename,len(arrLines)))
    print("    #Lines considered:{0}".format(LASTLINE))

    print("    TEST INDVIDUAL FUNCTIONS::")
    # Test on the FIRST x blocks
    arrSlice = arrLines[:LASTLINE][:]
    pb = ParseBlocks()

    # Find Index of the Ion Blocks
    indArr = pb.findIonBlocks(arrSlice)
    print("    Indices of the Block Pairs i.e. (Start,End):\n    {0}".format(indArr))
    for i in indArr:
        print("     --> '{0}'".format(arrSlice[i].strip()))
parser = argparse.ArgumentParser(description='Given a CSV file (delimited by tabulates) it imports the data to a Mongodb collection. File requires a first line with header.')')
parser.add_argument('--inputfolder', dest='input_folder', 
    help='Input folder name.', type=str, required=True)
parser.add_argument('--mongoclient', dest='mongo_client', 
    help='Mongo client name (like \'localhost:32769/\').', type=str, required=True)
parser.add_argument('--mongodb', dest='mongo_db', 
    help='Mongo database name (like \'mydatabase\').', type=str, required=True)
parser.add_argument('--mongocol', dest='mongo_col', 
    help='Mongo collection name (like \'items\').', type=str, required=True)

args = parser.parse_args()
inputFolderName = args.input_folder
mongoClient = args.mongo_client
mongoDb = args.mongo_db
mongoCol = args.mongo_col

#insert items to database
myclient = pymongo.MongoClient("mongodb://" + mongoClient)
mydb = myclient[mongoDb]
mycol = mydb[mongoCol]

# 1) iterate CSV files
# 2) convert data files to array of dictionaries
# 3) insert items of array into database
files = os.listdir(inputFolderName) #https://docs.python.org/3.8/library/os.html?highlight=listdir#os.listdir
for file in files:
    if file.endswith('.csv'):
        items = readFile(inputFolderName + file)
        mycol.insert_many(items)

print("Done.")
Example #7
0
def main():
    current_app.logger.info("Hi")

    current_dir = os.path.dirname(__file__)

    return readfile.readFile(os.path.join(current_dir, "message.txt"))
Example #8
0
import os
import re
from readfile import readFile
from automovel import Automovel

import pandas as pd
import matplotlib.pyplot as plt

lojaCarros = []
with os.scandir('./tests') as entries:
    for entry in entries:
        with open(entry, 'r', encoding='utf8') as file:
            automovel = readFile(file)
            result = Automovel(automovel)
            lojaCarros.append(result)

df = pd.DataFrame({
    'nome': [
        lojaCarros[0].nome, lojaCarros[1].nome, lojaCarros[2].nome,
        lojaCarros[3].nome, lojaCarros[4].nome, lojaCarros[5].nome,
        lojaCarros[6].nome, lojaCarros[7].nome, lojaCarros[8].nome,
        lojaCarros[9].nome, lojaCarros[10].nome, lojaCarros[11].nome,
        lojaCarros[12].nome, lojaCarros[13].nome, lojaCarros[14].nome
    ],
    'ano': [
        lojaCarros[0].ano, lojaCarros[1].ano, lojaCarros[2].ano,
        lojaCarros[3].ano, lojaCarros[4].ano, lojaCarros[5].ano,
        lojaCarros[6].ano, lojaCarros[7].ano, lojaCarros[8].ano,
        lojaCarros[9].ano, lojaCarros[10].ano, lojaCarros[11].ano,
        lojaCarros[12].ano, lojaCarros[13].ano, lojaCarros[14].ano
    ],
        description=
        "Below is a list of optional arguements with descriptions. Please refer to Readme for full documentation and examples..."
    )
    parser.add_argument("-c",
                        "--creds",
                        help="Access creds from creds.txt",
                        action="store_false")
    parser.add_argument("-btu",
                        "--mmbtu",
                        help="Display data in units of MMbtu rather than MMcf",
                        action="store_true")
    options = parser.parse_args()

    # Get user creds
    if os.path.exists("creds.txt") or options.creds:
        credentials = readfile.readFile("creds.txt")
        username, password = credentials[0], credentials[1]
    else:
        username = input('Enter username: '******'Enter password: ')

    # Connect to the database
    connection = access.connect(username, password)

    # Get date range and pipeline id
    date_range = pointCap.getDateRange()
    pipeline_id = int(input("Enter pipeline id: "))
    # Get flow average and max filters
    if options.mmbtu is False:
        avg_filter = int(
            input(
Example #10
0
import math
import readfile
import vector_handler
import classifier
file1 = open("hw5db1.txt","r")
file2 = open("hw5db2.txt","r")
dataIn = readfile.readFile(file1)
stats = readfile.readStats(file2)

vectors = vector_handler.vector_holder(dataIn,stats)
classifier = classifier.classifier()


result1 = classifier.directClassify(vectors.vectorArr,vectors.statArr,classifier.method_1)
result2 = classifier.directClassify(vectors.vectorArr,vectors.statArr,classifier.method_2)

for i in range(0,15):
    if result1[i]:
        print('A')
    else:
        print('N')
Example #11
0
    try:
        cursor.execute(sql)
        results = cursor.fetchall()
    except:
        print("Error while executing SQL...")
        return None

    # Close and return
    cursor.close()
    return pd.DataFrame(results, columns=["Issue Date", "Price Point Name", "Region Name", "Average Price"])


# Run
if __name__ == "__main__":
    # Get creds
    user, password = readfile.readFile("creds.txt")
    # Get price data
    prices = accessDB(user, password)
    # Fill NaN and alter dtype
    prices.fillna(0, inplace=True)
    prices["Average Price"] = prices["Average Price"].astype(float)
    
    # Pivot hub
    hub_prices = prices.pivot_table(values="Average Price", index=["Issue Date"], columns=["Price Point Name"])
    # Get day-to-day difference
    hub_prices_diff = hub_prices.diff()
    # Get percentage change day-to-day
    price_percentage = hub_prices_diff / hub_prices

    # Get junk columns to drop
    to_drop = price_percentage.columns[(price_percentage.abs() <= 0.05).iloc[-1]]