Example #1
0
def run_test(file_path):

    test_data = loader.load_file(file_path)
    logger.log_debug(test_data)
    req_kwargs = test_data['request']
    url = req_kwargs.pop('url')
    method = req_kwargs.pop('method')
    print(test_data)
    resp_obj = requests.request(url=url, method=method, **req_kwargs)
    return resp_obj
Example #2
0
def start_emulator():
    pygame.init()

    initialize()

    # Start memory
    mem.initialize()

    # Load rom
    loader.load_file('SuperMarioBros(E).nes')

    # Start CPU
    cpu.initialize()

    # Debug functions

    # Emulation Loop
    while True:
        cpu.cycle()
        ppu.cycle()
def process_file(file):
    """ Given a file, load it, parse it,
    convert all headlines to POS tries, and save the graph
    """
    logging.info("Loading File: {}".format(file))
    data = load_file(file)
    g = nx.DiGraph()
    g.add_node('__ROOT')
    for doc in data:
        if 'headline' not in doc or 'main' not in doc['headline']:
            IPython.embed(simple_prompt=True)
            continue
        logging.info("Processing: {}".format(doc['headline']['main']))
        path = "__ROOT"
        p_text = nlp(doc['headline']['main'])
        IPython.embed(simple_prompt=True)
        for tok in p_text:
            tag = tok.tag_
            new_path = path + "." + tag
            if not g.has_edge(path, new_path):
                g.add_node(new_path, tag=tag, word_set=set())
                g.add_edge(path,new_path, weight=0)
            g[path][new_path]['weight'] += 1
            g.nodes[new_path]['word_set'].add(tok.text)

            path = new_path

        if 'keywords' not in g.nodes[path]:
            g.nodes[path]['keywords'] = []
        g.nodes[path]['keywords'] += doc['keywords']

        if '_ids' not in g.nodes[path]:
            g.nodes[path]['_ids'] = []
        g.nodes[path]['_id'] += doc['_id']

        g.nodes[path]['LEAF'] = True

    # All docs processed, save
    nx.write_gpickle(g, join(grammar_dir,file + ".pkl"))
Example #4
0
def main(path):
    with open(path, 'rt') as f:
        keyfun = lambda item: (item.size, item.distribution_name)
        data = groupby(load_file(f), keyfun)

    header1 = [("", 3),
               ("speedup over %s procedure" % reference_procedure,
                len(procedures) * 3)]
    header2 = [("", 3)]
    header3 = ["size [B]", "distribution", "samples"]
    for proc in procedures:
        header2.append((proc, 3))
        header3.extend(["min", "avg", "max"])

    table = Table()
    table.add_header(header1)
    table.add_header(header2)
    table.add_header(header3)

    for key in sorted(data):
        collection = data[key]
        size, name, stats = calculate_speedup_statistics(collection)

        row = []
        row.append('%d' % size)
        row.append(get_distribution_title(name))
        row.append('%d' % len(collection))

        for proc in procedures:
            row.append('%0.2f' % stats[proc][0])
            row.append('%0.2f' % stats[proc][1])
            row.append('%0.2f' % stats[proc][2])

        table.add_row(row)

    print table
Example #5
0
 def get_data_series(self):
     persistant_storage = PersistentStorage()
     
     try:
         last_path_used = persistant_storage.get_value("fromfile_last_dir_used")
     except KeyError:
         last_path_used = ""
     
     #get filename to open
     file_to_open = wx.FileSelector("Choose file to open", default_path=last_path_used)
     if file_to_open == "":
         return
     
     persistant_storage.set_value("fromfile_last_dir_used", os.path.dirname(file_to_open))
     
     wx.BeginBusyCursor()
     try:
         contents = loader.load_file(file_to_open)
         series_select_dialog = TxtFileDataSeriesSelectFrame(self.get_parent(), contents)
     finally:
         wx.EndBusyCursor()
     
     if series_select_dialog.ShowModal() == wx.ID_OK:
         return series_select_dialog.get_series()
Example #6
0
if __name__ == '__main__':

    parser = a.ArgumentParser(description='A simple fof solver using tptp syntax')
    parser.add_argument('--file', action='store')
    parser.add_argument('--jsonfile', action='store')
    parser.add_argument('--formula', action='store')

    args = vars(parser.parse_args())
    if args['file']:
        fof_data = l.parse_and_load(args['file'])
        #fof_data = from_file(args['file'])
    elif args['formula']:
        fof_data = from_string("fof(ax,axiom," + args['formula'] + ").")
    elif args['jsonfile']:
        fof_data = l.load_file(args['jsonfile'])
    else :
        string = "fof(ax, axiom, ![X]: r(X) => ?[Y]:r(Y) ).fof(ax, conjecture, ![X]: r(X) => ?[Y]:r(Y) )."
        fof_data = from_string(string)

    print("input formula:",fof_data)
    conjectures = []
    axioms = []
    for formula in fof_data:
        if formula['type'] in ('axiom', 'theorem'):
            formula = o.transform(formula['formula'])
            axioms.append(formula)
        elif formula['type'] in ('conjecture'):
            f = formula['formula'].negate()
            f = o.transform(f)
            conjectures.append(f)
Example #7
0
try:
    xrange
except:
    xrange = range


def totalvalue(comb):
    " Totalise a particular combination of items"
    totwt = totval = 0
    for item, wt, val in comb:
        totwt += wt
        totval += val
    return (totval, -totwt) if totwt <= 400 else (0, 0)


capacity, items = load_file("input")


def knapsack01_dp(items, limit):
    table = [[0 for w in range(limit + 1)] for j in xrange(len(items) + 1)]

    for j in xrange(1, len(items) + 1):
        item, wt, val = items[j - 1]
        for w in xrange(1, limit + 1):
            if wt > w:
                table[j][w] = table[j - 1][w]
            else:
                table[j][w] = max(table[j - 1][w], table[j - 1][w - wt] + val)

    result = []
    w = limit
def assets(path):
    ext = request.path.split("/")[-1].split(".")[-1]
    return Response(load_file(request.path), mimetype=MIME_TYPE[ext])
Example #9
0
import sys
sys.path.append('/home/barbara/scheduling_procedures/TAEMS')
sys.path.append('/home/barbara/scheduling_procedures/Genetic algorithm')
sys.path.append('/home/barbara/scheduling_procedures/Tabu search')
sys.path.append('/home/barbara/scheduling_procedures/Simulated annealing')
sys.path.append('/home/barbara/scheduling_procedures/Ant colony optimization')

from taems import TaemsTree
import genetic_algorithm
import tabu_search
import simulated_annealing
import ant_colony_optimization

if __name__ == "__main__":
    [alternative, pt, rt, dt] = loader.load_file("./Input/", "alternative_1.txt")
    tree = TaemsTree.load_from_file("./Input/example_large.taems")

    test = 2
    if test == 0:
        ga_solutions = []

        ga = genetic_algorithm.GeneticAlgorithm(200, 0.1, 0.3)
        for i in range(1000):
            [best_sol, iteration] = ga.optimize(tree, alternative, pt, rt, dt, 50)
            print([i, best_sol.total_tardiness, iteration])
            ga_solutions.append([best_sol, iteration])

        f = open("Results/ga_results.txt", "w")
        for i in range(1000):
            f.write(str(ga_solutions[i][0].total_tardiness) + " " + str(ga_solutions[i][1]) + "\n")
Example #10
0
import loader
import tables_to_leave
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.externals.joblib import load, dump

LOAD_PATH = 'C:\\Users\\Tom\\PycharmProjects\\wiseNeuro\\data\\pred.csv'
SAVE_PATH = 'C:\\Users\\Tom\\PycharmProjects\\wiseNeuro\\data\\prediction.csv'

df = loader.load_file(LOAD_PATH, ',')
df = loader.drop_tables(df, tables_to_leave.TABLE_LIST)
df = df.fillna(0)
df = df.to_numpy(dtype=float)
scaler = load('std_scaler.bin')
#scaler = StandardScaler()
#scaler.fit(df)
df = scaler.transform(df)
model = loader.load_model()
prediction = model.predict(df)
newframe = pd.DataFrame()
newframe['result'] = prediction.flatten().astype(float)


def drop_result(df):
    df_head = list(df)
    for head in df_head:
        if head not in ('RAJ2000', 'DEJ2000'):
            del df[head]
    return df

Example #11
0
from __future__ import absolute_import, division, print_function, unicode_literals
from sklearn.preprocessing import StandardScaler
import pandas as pd
import tensorflow as tf
import loader
import tables_to_leave
from sklearn.externals.joblib import dump, load

KNOWN_PATCH = 'C:\\Users\\Tom\\PycharmProjects\\wiseNeuro\\data\\known.csv'
SKY_PATCH = 'C:\\Users\\Tom\\PycharmProjects\\wiseNeuro\\data\\sky_train.csv'
TEST_PATH = ''

known_df = loader.load_file(KNOWN_PATCH, sep=';')
known_df['isdwarf'] = 1
known_df = loader.drop_tables(known_df, tables_to_leave.TABLE_LIST)
print(known_df.head())
sky_df = loader.load_file(SKY_PATCH, sep=',')
sky_df['isdwarf'] = 0
sky_df = loader.drop_tables(sky_df, tables_to_leave.TABLE_LIST)
print(sky_df.head())
df = pd.concat([known_df, sky_df], sort=False)
df = df.fillna(0)
df_len = len(df.columns)
print(df_len)
target = df.pop('isdwarf')
scaler = StandardScaler()
scaler.fit(df)
dump(scaler, 'std_scaler.bin', compress=True)
df = scaler.transform(df)
dataset = tf.data.Dataset.from_tensor_slices((df, target.values))
train_dataset = dataset.shuffle(len(df)).batch(1)
Example #12
0
    return ( comb
             for r in range(1, len(items) + 1)
             for comb in combinations(items, r)
    )


def totalvalue(comb):
    ' Totalise a particular combination of items'
    totwt = totval = 0
    for item, wt, val in comb:
        totwt += wt
        totval += val
    return (totval, -totwt) if totwt <= 400 else (0, 0)


capacity, items = load_file('input')


def fil(x):
    val, dummy = totalvalue(x)
    return val


def fil2(x):
    dummy, wt = totalvalue(x)
    return wt


xd = [x for x in anycomb(items) if -fil2(x) <= capacity]
bagged = max(xd, key=totalvalue)
Example #13
0
if __name__ == '__main__':

    parser = a.ArgumentParser(
        description='A simple fof solver using tptp syntax')
    parser.add_argument('--file', action='store')
    parser.add_argument('--jsonfile', action='store')
    parser.add_argument('--formula', action='store')

    args = vars(parser.parse_args())
    if args['file']:
        fof_data = l.parse_and_load(args['file'])
        #fof_data = from_file(args['file'])
    elif args['formula']:
        fof_data = from_string("fof(ax,axiom," + args['formula'] + ").")
    elif args['jsonfile']:
        fof_data = l.load_file(args['jsonfile'])
    else:
        string = "fof(ax, axiom, ![X]: r(X) => ?[Y]:r(Y) ).fof(ax, conjecture, ![X]: r(X) => ?[Y]:r(Y) )."
        fof_data = from_string(string)

    print("input formula:", fof_data)
    conjectures = []
    axioms = []
    for formula in fof_data:
        if formula['type'] in ('axiom', 'theorem'):
            formula = o.transform(formula['formula'])
            axioms.append(formula)
        elif formula['type'] in ('conjecture'):
            f = formula['formula'].negate()
            f = o.transform(f)
            conjectures.append(f)