Ejemplo n.º 1
0
def run(select, verbose, wait_time=0.0, amount=100):
    global f, s
    # clear_fcfs()  # clear (previous) FCFS select results
    # clear_sjf()  # clear (previous) SJF select results

    if select != "none":
        prepare(select, amount)

    if select.lower() == "fcfs":
        w = load(select)
        f = fcfs(w, wait_time, verbose)

    elif select.lower() == "sjf":
        w = load(select)
        s = sjf(w, wait_time, verbose)

    elif select.lower() == "both":
        w = load(select)
        f = fcfs(w, wait_time, verbose)
        w = load(select)
        s = sjf(w, wait_time, verbose)

    elif select.lower() == "none":
        pass

    else:
        exit(1)

    return [f, s]
Ejemplo n.º 2
0
def start_offline(dataset):
    #sys.path.append(envpath)
    os.chdir(envpath)
    import prepare
    prepare.prepare(dataset)
    import train
    train.train(dataset)
Ejemplo n.º 3
0
def load_data(configuration_path, DATA_ID):
    configuration = io.load_yml(configuration_path, DATA_ID)
    try:
        data = io.load_pickle(configuration['pkl_file'])
    except:
        prepare.prepare(DATA_ID, configuration_path)
        data = io.load_pickle(configuration['pkl_file'])

    return data
Ejemplo n.º 4
0
def load_data(cfg_path, DATA_ID):
    cfg = io.load_yml(cfg_path, DATA_ID)
    try:
        data = io.load_pickle(cfg['pkl_file'])
    except:
        prepare.prepare(DATA_ID, cfg['pkl_file'])
        data = io.load_pickle(cfg['pkl_file'])

    return data
Ejemplo n.º 5
0
def main(_):
    config = flags.FLAGS
    if config.mode == "prepare":
        prepare(config)
    elif config.mode == "test":
        test(config)
    elif config.mode == "train":
        train(config)
    else:
        print("Unknown mode")
        exit(0)
Ejemplo n.º 6
0
def train(fold, model, last):

    # read the training data with folds
    df = pd.read_csv(config.TRAIN_FILE)

    if last:
        # train on whole dataset
        x_train = df.drop([config.TARGET_LABEL, "FOLD"], axis=1)
        y_train = df[config.TARGET_LABEL]
    else:
        # training data is where kfold is not equal to provided fold
        df_train = df[df["FOLD"] != fold].reset_index(drop=True)

        # validation data is where kfold is equal to provided fold
        df_valid = df[df["FOLD"] == fold].reset_index(drop=True)

        # create training samples
        x_train = df_train.drop([config.TARGET_LABEL, "FOLD"], axis=1)
        y_train = df_train[config.TARGET_LABEL]

        # create validation samples
        x_valid = df_valid.drop([config.TARGET_LABEL, "FOLD"], axis=1)
        y_valid = df_valid[config.TARGET_LABEL]

    # perform cleaning, feature engineering,
    # categorical variables encoding & scaling
    x_train = prepare(x_train)

    # fetch the model from models
    clf = models[model]

    # fit the model on training data
    clf.fit(x_train, y_train)

    if not last:
        # calculate & print metric
        predictions = clf.predict(prepare(x_valid))
        scorer = metrics.get_scorer(config.METRIC)
        metric = scorer._score_func(y_valid, predictions)
        print(f"Fold={fold}, {config.METRIC}={metric}")

    model_path = f"{model}.bin" if last else f"{model}_fold{fold}.bin"

    # save the model
    joblib.dump(clf, os.path.join(config.MODEL_OUTPUT, model_path))

    if last:
        print("Last model saved at: " + model_path)
Ejemplo n.º 7
0
def main():
    collection = prepare()
    collection.insert_many([{'_id': i} for i in range(7)])

    search_condictions = {
        'collection': collection,
        'filter': {},
        'sort': [('_id', 1)],
        'limit': 5,
    }

    # gets the first page
    cursor1 = get_keyset_cursor(**search_condictions)
    print('page1:', list(cursor1))  # [{'_id': 0}, {'_id': 1}, {'_id': 2}, {'_id': 3}, {'_id': 4}]

    # gets the second page
    cursor2 = get_keyset_cursor(**search_condictions, position=cursor1.paging.next_position)
    print('page2:', list(cursor2))  # [{'_id': 5}, {'_id': 6}]

    collection.insert({'_id': -1})

    # the first page again, backwards from the previous page
    cursor1 = get_keyset_cursor(**search_condictions, position=cursor2.paging.previous_position)
    print('page1:', list(cursor1))  # [{'_id': 0}, {'_id': 1}, {'_id': 2}, {'_id': 3}, {'_id': 4}]

    # what if new items were added at the start?
    if cursor1.paging.has_previous:
        cursor0 = get_keyset_cursor(**search_condictions, position=cursor1.paging.previous_position)
        print('page0:', list(cursor0))  # [{'_id': -1}]
Ejemplo n.º 8
0
def analyze(userid):
    # save the recent 10 posts into ./userid/
    scraper.get_rec10(userid)
    # load the saved vocabulary file
    tag_vocab = {}
    for tag in TAGS:
        d = {}
        f = open(tag + '.csv')
        for line in f.readlines():
            if len(line.split(',')) == 2:
                newline = line.replace('\n', '')
                word, freq = newline.split(',')
                d[word] = float(freq)
        tag_vocab[tag] = d
    # save 10 classifying results in one list, then return it
    results = []
    # process the posts saved, generate hot words list
    for root, dirs, files in os.walk(userid):
        for f in files:
            if f.split('.')[-1] != 'txt':
                continue
            # get 30 most used words
            hot_words = prepare.prepare(userid + '/' + f)
            # core: calculate likelyhood for every tag
            probability = {}
            for tag in TAGS:
                probability[tag] = 1.0
                for word in hot_words:
                    try:
                        probability[tag] *= tag_vocab[tag][word]
                    except KeyError:
                        probability[tag] *= MINIMAL_PROB[tag]
            results.append(max(probability, key=probability.get))
    return results
Ejemplo n.º 9
0
def update(dry_run):
    if dry_run:
        print "This is a dry run. Nothing will be sent to blogger.com"
        print "Use command line argument --write to disable dry run"
    else:
        print "This is not a dry run. Updates will happen"

    info = util.Info()

    bloggerfeed = get_blogger_feed(info)

    autolinks = generate_automatic_links(bloggerfeed)

    diskfeed = get_disk_feed()

    unending_bloggerfeed = itertools.chain(reversed(bloggerfeed.entry),
                                           itertools.repeat(None))
    for d, b in itertools.izip(diskfeed, unending_bloggerfeed):
        name = os.path.join(root, d)
        title, doc = prepare.prepare(name, autolinks)
        if b:
            print "Considering", d
            should_update = False
            if title != b.title.text:
                print "\tUpdating title '%s' != '%s'" % (title, b.title.text)
                should_update = True
            if doc != b.content.text:
                print "\tUpdating content"
                should_update = True
            if should_update and not dry_run:
                publish.update(info, b, title, doc)
        else:
            print "Adding", d
            if not dry_run:
                publish.add(info, title, doc)
Ejemplo n.º 10
0
def graph(sourcedir,regex,config,dictionary,proto,testratio,n,maxk,outfile,mfcdir,outdir,tempdir):

    wdnet, htk_dict, monophones, words_mlf, scp = prepare(sourcedir, mfcdir, outdir, regex, config, dictionary, tempdir)

    with open(scp) as f:
        full_set = set(f.readlines())
    
    test_n = round(len(full_set)*testratio)
    test_set = set(random.sample(full_set,int(test_n)))
    train_set = full_set - test_set

    scp_test = os.path.join(tempdir,'scp_test')
    with open(scp_test,'w') as f:
        f.writelines(test_set)

    if maxk == -1:
        maxk = len(train_set)

    def k_result(k):
        train_k = random.sample(train_set,k)
        scp_k = os.path.join(tempdir,'scp_k')
        with open(scp_k,'w') as f:
            f.writelines(train_k)
        final_dir = train(outdir, config, scp_k, proto, htk_dict, words_mlf, monophones, tempdir)
        return test(outdir, final_dir, wdnet, htk_dict, monophones, scp_test, words_mlf, tempdir)
    
    with open(outfile,'w') as f:
        for i in range(n):
            k = int(round(float(i+1)/n*maxk))
            r = k_result(k)
            f.write('{} {}\n'.format(k,r))
            print('i k r =',i,k,r)
Ejemplo n.º 11
0
def predi_():
    new_img = request.get_json()
    img_name = new_img['imageName']
    path = 'image/{}'.format(img_name)
    storage.child(path).download('image.jpg')
    img = prepare('image.jpg')
    Class = prediction(model, img)
    return jsonify({'data': Class})
Ejemplo n.º 12
0
def prepare_data(paragraphs):
    sentences = []
    for paragraph in paragraphs:
        sentences = sentences + prepare.prepare(cleanup.clean(paragraph))

    model = {}
    for sentence in sentences:
        model = make_pairs(tokenize.tokenize(sentence), model)

    return model
Ejemplo n.º 13
0
def main():
    from prepare import prepare
    from decouple import config
    from time import sleep

    prepare()

    print("Collection weather data ...")

    collect_time = config("COLLECT_TIME_MIN", 60, cast=int)
    while True:
        try:
            collect()
            if collect_time > 0:
                print("Waiting another {0} minutes...".format(collect_time))
                sleep(60 * collect_time)
            else:
                break
        except KeyboardInterrupt:
            print("Manual break by user")
Ejemplo n.º 14
0
def newDataset():
    dataset = l.loadDataset(DATASET_FILE)
    dataset = p.prepare(dataset)
    s.saveDataset(dataset)
    X_train, X_test, y_train, y_test, y = p.splitSets(dataset)
    s.saveSets(X_train, X_test, y_train, y_test, y)
    clf = c.createClf()
    s.saveClassifier(clf)
    c.trainClassifier(clf, X_train, y_train)
    m.printScoring(clf, X_test, y, y_test)
    m.printPercentage(clf, X_test, y, y_test)
Ejemplo n.º 15
0
def main(_):
    config = flags.FLAGS
    if config.mode == "get_vocab":
        get_vocab(config)
    elif config.mode == "prepare":
        prepare(config)
    elif config.mode == "train":
        train(config)
    elif config.mode == "train_rl":
        train_rl(config)
    elif config.mode == "train_qpp":
        train_qpp(config)
    elif config.mode == "train_qap":
        train_qap(config)
    elif config.mode == "train_qqp_qap":
        train_qqp_qap(config)
    elif config.mode == "test":
        test(config)
    else:
        print("Unknown mode")
        exit(0)
Ejemplo n.º 16
0
 def get_value(self, features, use_ECR):
     ECR_value = None
     IS_value = None
     key = ",".join([str(i) for i in features])
     if key in self.hist:
         ECR_value = self.hist[key][0]
         IS_value = self.hist[key][1]
     else:
         filename = "temp.csv"
         prepare(features, "binned_{}_reorder.csv".format(self.num_bin),
                 filename)
         ECR_value, IS_value = MDP_policy.induce_policy_MDP(filename)
         data = [ECR_value, IS_value]
         data.extend(features)
         exportCSV(
             data, "bin_{}_history/{}.csv".format(self.num_bin,
                                                  len(features)))
         self.hist[key] = [float(ECR_value), float(IS_value)]
     curr_value = ECR_value
     if not use_ECR:
         curr_value = IS_value
     return curr_value
Ejemplo n.º 17
0
def main(args):
    """
    This trains the PedalNet model to match the output data from the input data.

    When you resume training from an existing model, you can override hparams such as
        max_epochs, batch_size, or learning_rate. Note that changing num_channels,
        dilation_depth, num_repeat, or kernel_size will change the shape of the WaveNet
        model and is not advised.

    """

    prepare(args)
    model = PedalNet(vars(args))
    trainer = pl.Trainer(
        resume_from_checkpoint=args.model if args.resume else None,
        gpus=None if args.cpu or args.tpu_cores else args.gpus,
        tpu_cores=args.tpu_cores,
        log_every_n_steps=100,
        max_epochs=args.max_epochs,
    )

    trainer.fit(model)
    trainer.save_checkpoint(args.model)
Ejemplo n.º 18
0
def infer(model_path):

    # read the test data
    df = pd.read_csv(config.TEST_FILE)

    # perform cleaning, feature engineering,
    # categorical variables encoding & scaling
    df = prepare(df)

    # fetch the pretrained model
    clf = joblib.load(model_path)

    # predict on test dataset
    predictions = clf.predict(df)
    df["predictions"] = predictions

    # dump dataset with predictions
    df.to_csv("../submission.csv", index=False)
Ejemplo n.º 19
0
# -*- coding: utf-8 -*-

from flask import Flask, render_template, redirect
from flask_flatpages import FlatPages
from flask_frozen import Freezer
from prepare import prepare

# -----------------------------------------------------------------------------

app = Flask(__name__)
app.config.from_pyfile('settings.py')
pages = FlatPages(app)
freezer = Freezer(app)
family_book, parent_map = prepare()

# -----------------------------------------------------------------------------


def get_spec_posts(_type, lang, num=5):
    posts = [page for page in pages if page.path.startswith(lang) \
                and 'date' in page.meta \
                and 'type' in page.meta and page.meta['type'] == _type]
    sorted_posts = sorted(posts,
                          reverse=True,
                          key=lambda page: page.meta['date'])[:num]
    return sorted_posts


@app.route('/')
def home():
    return redirect('/cn/')
Ejemplo n.º 20
0
    
    if hp.jsonl is None:
        if not (hp.train and hp.val):
            print('Pass the train.csv and val.csv path')
            exit()
            
        train_dataset = dataset(hp.train)
        train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, **params)

        valid_dataset = dataset(hp.val)
        valid_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, **params)

    else:
        
        prepare(hp.jsonl).run(hp.training_method)
        
        train_dataset = dataset(
        os.path.join(self.target_path,'train%d.csv'%(hp.training_method)))
        
        train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, **params)

        valid_dataset = dataset(
        os.path.join(self.target_path,'val%d.csv'%(hp.training_method)))
        
        valid_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, **params)
    
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("Using device:", device)
    
Ejemplo n.º 21
0
    soln, agg_soln = optimize(mu=(mu[0], mu_p2),
                              sigma=(cov[0], cov_p2),
                              alpha=(0.05, 0.10),
                              return_target=(target_returns, target_returns),
                              costs=cost,
                              prices=prices,
                              gamma=risk_tolerance[2])

    return soln, agg_soln


## *********************************************************************************************************************
#  INPUTS
## *********************************************************************************************************************
mu, cov, cost, prices = prepare()

# base portfolio ... comes as an input
portfolio = {'SPY': 0.25, 'MSFT': 0.10, 'JPM': 0.25, 'NTIOF': 0.40}

# our tracked assets
tickers = list(pd.read_csv(os.getcwd() + r'/data/tickers.csv')['Tickers'])

# the number of assets
N = len(tickers)

# user choose which assets (from the 18 we follow) should be included.
cardinality = [1] * 7 + [0] * (N - 7)
risk_tolerance = [((1, 10), (0, 0.10), cardinality, 'SHARPE'),
                  ((5, 5), (0, 0.20), cardinality, 'SHARPE'),
                  ((10, 1), (-0.05, 0.50), cardinality, 'SHARPE')]
Ejemplo n.º 22
0
def do_prepare(envdir, srcdir, source):
    prepare.prepare(envdir, srcdir, source.prepare_arguments)
Ejemplo n.º 23
0
    def __init__(self):
        """
        Takes command line variables and does 1 of the following: uploads,prepares,upgrades,backout
        """
        parser = OptionParser("usage: ftosupgrade <options>")
        parser.add_option("-d", "--devices", dest="devices",
            help="List of devices to upgrade separated by a ','", default=None)
        parser.add_option("-r", "--region", dest="region",
            help="Any region e.g us-east (for uploads only!)", default=None)
        parser.add_option("-t", "--type",dest="type",
            help="This can be prepare,upgrade,backout, or upload",
            choices=['upload','prepare','upgrade','backout'],
            default='prepare')
        parser.add_option("-b","--binfile",dest="binfile",
            help="The name of the binary file you are using for the upgrade e.g. FTOS-SK-9.14.1.0.bin")
        parser.add_option("-n","--numforks",dest="numforks",
            help="The number of scp sessions you want to run at once, default is 20 (only for uploads!)",default=20)
        parser.add_option("-f", "--force", dest="noforce",
            action="store_false",
            help="use -f to force scripts to run (only works with prepare at the moment)", default=True)
        parser.add_option("--test", dest="notest",
            action="store_false",
            help="use --test to run the upgrade command without reloading the switch", default=True)
        parser.add_option("-p","--binfilepath",dest="binfilepath",
            default=BINFILEPATH,
            help="The path where all your binary files are stored")
        (options, args) = parser.parse_args()
        dl = os.getcwd().split('/')
        self.options=options
        if options.type=='upload' and options.binfile is not None and (options.devices is not None or options.region is not None):
            #upload bin files to multiple devices
            self.devlist=list()
            self.sqllist=list()
            if options.devices:
                self.devlist=options.devices.split(',')
                self.makeforks()
            else:
                self.region=options.region
                self.gethosts()
                i=1
                for hn in self.sqllist:
                    if i<=int(options.numforks):
                        self.devlist.append(hn)
                        i=i+1
                    else:
                        i=0
                        self.makeforks()
                        self.devlist=[hn]
                self.makeforks()
        elif options.devices is not None and options.binfile is not None:
            # prepare and upgrade devices
            self.devlist=options.devices.split(",")
            for d in self.devlist:
                m=utils(hostname=d,options=options)
                m.setupworkspace()
                if options.type=='prepare':
                    prepare(hostname=d,options=options)
                elif options.type=='backout':
                    backout(hostname=d,options=options)
                elif options.type=='upgrade':
                    p=prepare(hostname=d,options=options)
                    if len(p.errors[d]['critical'])<1:
                        u=upgrade(hostname=d,options=options)
                        if len(u.errors[d]['critical'])>0:
                            m.warning('Found Errors with upgrade...exiting!')
                            sys.exit()
                    else:
                        m.warning('Found Errors with prepare... exiting!')
                        sys.exit()

        else:
            print("Please specify at least 1 device and a binary file name")
            parser.print_help()
Ejemplo n.º 24
0
import tkinter
from config import config as configuration, Config
from prepare import prepare

__author__ = 'oglandx'

tk_root = tkinter.Tk()

imgs = prepare(configuration)
print(imgs)

canvas = tkinter.Canvas(tk_root, width=1300, height=700)
canvas.pack()

for k in imgs.keys():
    print(k)

for i, cls in enumerate(imgs.values()):
    for j, img in enumerate(cls):
        canvas.create_image(
            (Config.size[0] * (0.5 + 1.2 * i), Config.size[1] * (j + 1)),
            image=img)

tk_root.mainloop()
Ejemplo n.º 25
0
              (r'/waiter-receive', WaiterReceiveHandler),
              (r'/waiter-query', WaiterQueryHandler),
              (r'/waiter-done', WaiterDoneHandler),
              (r'/waiter-left-update', WaiterLeftUpdateHandler),
              (r'/waiter-done-update', WaiterDoneUpdateHandler),
              (r'/waiter-queue', WaiterQueueHandler),
              (r'/waiter-desk-update', WaiterDeskUpdateHandler),
              (r'/waiter-queue-update', WaiterQueueUpdateHandler),
              (r'/pictures/(.+)', PictureHandler)]
settings = dict(
    cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
    # login_url="/",
    template_path=os.path.join(os.getcwd(), "templates"),
    static_path=os.path.join(os.getcwd(), "static"),
    xsrf_cookies=False,
    debug=options.debug,
)


def main():
    tornado.options.parse_command_line()
    application = tornado.web.Application(myhandlers, **settings)
    http_server = tornado.httpserver.HTTPServer(application)
    http_server.listen(options.port)
    tornado.ioloop.IOLoop.instance().start()


if __name__ == "__main__":
    prepare.prepare()
    main()
Ejemplo n.º 26
0
def main():

    prepare()
    n_args = count_args()
    for i in tqdm(range(n_args)):
        run(i)
Ejemplo n.º 27
0
        """
        norm_factor = np.linalg.norm(vector)
        vector = [_ / norm_factor for _ in norm_factor]
        return vector


if TestFlag == True:
    if __name__ == "__main__":
        folder_dir = './Edge_Lists/AUCS/'
        """
        =======================
        prepare 数据
        =======================
        """
        DirectFlag = False
        NodesSet, EdgesList_layers = prepare(folder_dir, DirectFlag)
        """
        =======================
        设置超参数
        =======================
        """
        dimension = 10
        margin = 1
        Learning_rate = 0.001
        nbatch = 1
        nepoch = 1000
        error = 0.0000001
        """
        =======================
        Embedding 每一个网络
        =======================
Ejemplo n.º 28
0
from time import sleep
import recorder
import player
import prepare

recording = []
samples = 0

(recording, samples) = recorder.record(2)
recording = prepare.prepare(recording)

player.replay(b''.join(recording))
Ejemplo n.º 29
0
# -*- coding: utf-8 -*-

from flask import Flask, render_template, redirect
from flask_flatpages import FlatPages
from flask_frozen import Freezer
from prepare import prepare

# -----------------------------------------------------------------------------

app = Flask(__name__)
app.config.from_pyfile('settings.py')
pages = FlatPages(app)
freezer = Freezer(app)
family_book, parent_map = prepare()

# -----------------------------------------------------------------------------

def get_spec_posts(_type, lang, num = 5):
    posts = [page for page in pages if page.path.startswith(lang) \
                and 'date' in page.meta \
                and 'type' in page.meta and page.meta['type'] == _type]
    sorted_posts = sorted(posts, reverse=True,
        key=lambda page: page.meta['date'])[:num]
    return sorted_posts

@app.route('/')
def home():
    return redirect('/cn/')

@app.route('/<lang>/')
def home_lang(lang):
Ejemplo n.º 30
0
from report import report

usage = """
======================================================================
    használat:
    python checker.py sol=fájl lang=nyelv problem=feladat mode=mód
      a paraméterek:
      sol: a megoldás elérési útja
      lang: a nyelv
      problem: a tesztelendő feladat rövid neve
      mode: 
        prog --> klasszikus program
        func --> csak egy függvény, ez a DEFAULT.
    A sol mindenképpen kell. A többit megpróbálja a fájlnévből kitalálni.
    Például:
    python checker.py sol=bday.py
    megfelel a hosszabb:
    python checker.py sol=bday.py lang=python problem=bday mode=func
    hívásnak.
======================================================================
"""

res = prepare(dict())
res = execute(res)
res = evaluate(res)
report(res)

print()
if res["prep"]["msg"] != "OK":
    print(usage)
Ejemplo n.º 31
0
def do_prepare(envdir, srcdir, source):
    prepare.prepare(envdir, srcdir, source.prepare_arguments)
Ejemplo n.º 32
0
import sys

import numpy as np
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Dropout
import matplotlib.pyplot as plt

from prepare import prepare
import color

data, target = prepare('./data/XRP_usdt_xrp_1.csv',
                       './data/XRP_usdt_btc_1.csv',
                       './data/XRP_btc_xrp_1.csv')


data = np.array(data, dtype=float)
print(color.red(data.shape))

target = np.array(target, dtype=float)
print(color.red(target.shape))


# model = Sequential()
# model.add(LSTM(60, return_sequences=True, batch_input_shape=(None, len(data[0]), 6)))
# model.add(Dropout(0.35))
# model.add(LSTM(100, return_sequences=False))
# model.add(Dropout(0.35))
# model.add(Dense(1))
Ejemplo n.º 33
0
from prepare import prepare, update, kernel
from report import create_report
from watch import watch, flush
from estimate import Estimate
from tt import train, test
from pysvm.svm import SVM
from index import Index
from args import *
from db import *

try:
    cnx = connect(**base)
    lid = insert_launch(cnx)
    obj, sen = Index(), Index()
    stats, top, dl = prepare(cnx, lid, cargs["ngrams"], cargs["test"], obj,
                             sen)
    k = kernel(cargs["kernel"], cargs["sigma"])
    estimate = Estimate()
    estimate.read()

    for mode, (trset, teset) in zip(["objective", "sentiment"], dl):
        svm = SVM(k,
                  c=cargs["c"],
                  tol=cargs["tol"],
                  lpass=cargs["lpass"],
                  liter=cargs["liter"])
        svm = watch(svm, mode, len(trset[0]), estimate, cnx, lid, cargs)
        tr = train(svm, mode, trset[0], trset[1], cnx, lid)
        te = test(svm, mode, teset[0], teset[1])
        svm, ts = flush(svm, mode)
        stats = update(stats, tr, te, ts)
Ejemplo n.º 34
0
		INFILE = argv[i+1]
	if arg in ["-o", "--out"]:
		OUTFILE = argv[i+1]
	if arg == ["-m", "--meter"]:
		METER = argv[i+1]
	if arg == ["-l", "-L", "--mlength"]:
		BEATS_PER_MEASURE = argv[i+1]
	if arg == ["-k", "--key"]:
		KEY = argv[i+1]

my_file = open(INFILE, "r")
lines = my_file.readlines()
my_file.close()

# convert lines of our file into a list of pieces
pieces = prepare.prepare(lines)
pieces = [piece for piece in pieces if piece[0][0] != "###"]

to_delete = []
for i in range(len(pieces)):
	try:
		pieces[i][1]["M"]
	except KeyError:
		# print("piece has no meter:", piece)
		# print("piece without meter:",piece[0])
		# piece[1]["M"] = "6/8"
		to_delete.append(i)
		# del pieces[i]
for i in range(len(pieces)):
	try:
		pieces[i][1]["K"]
Ejemplo n.º 35
0
import os
from config import workspace_path
from train import train
from config import Model_Config
from prepare import prepare
from predict import predict
from evaluate import evaluate

print(os.getcwd())
os.chdir(workspace_path)
os.mkdir('log')
os.mkdir('hdf')
print(os.getcwd())
print(os.listdir('./'))

if __name__ == '__main__':
    # prepare
    X_train_processed, X_val_processed, Y_train_list, Y_val_list, vocabulary_size, label_distribute_dict_list = prepare(
    )
    # train
    model, history = train(X_train_processed, X_val_processed, Y_train_list,
                           Y_val_list, vocabulary_size,
                           label_distribute_dict_list)
    # predict
    preds = predict(X_val_processed, Model_Config.model_saved_filepath)
    # evaluate
    validate_df, df_pcf = evaluate(preds)
    # print precision、recall、f1 值
    print(df_pcf)
Ejemplo n.º 36
0
Archivo: app.py Proyecto: ufoym/pagehub
from flask_frozen import Freezer
from prepare import prepare

# -----------------------------------------------------------------------------

app = Flask(__name__)
app.config.from_pyfile('settings.py')
pages = FlatPages(app)
freezer = Freezer(app)

# -----------------------------------------------------------------------------

@app.route('/')
def home():
    posts = [page for page in pages if 'date' in page.meta]
    sorted_posts = sorted(posts, reverse=True,
        key=lambda page: page.meta['date'])
    return render_template('index.html', pages=sorted_posts)

@app.route('/<path:path>/')
def page(path):
    page = pages.get_or_404(path)
    return render_template('page.html', page=page)

# -----------------------------------------------------------------------------

if __name__ == "__main__":
    prepare()
    app.run()

# -----------------------------------------------------------------------------
Ejemplo n.º 37
0

from time import time
start = time()

print 'Importing...',
import prepare
import learn
print 'done'

aAList = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU',
          'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
working_directory = 'C:/university/biology'
prepare.prepare(working_directory)
learn.learn(working_directory, True)
print 'Total time taken: {} seconds'.format(time()-start)

Ejemplo n.º 38
0
###

i = open( input_file )
reader = csv.reader( i )

t = open( test_file )

headers = reader.next()
mapping = {}

for line in reader:
	query = line[3]
	sku = line[1]
	# print "%s -> %s" % ( query, sku )
	
	query = prepare( query )
	
	try:
		mapping[query][sku] += 1
	except KeyError:
		try:
			mapping[query][sku] = 1
		except KeyError:
			mapping[query] = {}
			mapping[query][sku] = 1

queries = mapping.keys()
nqueries = {}
for n in queries:
	nqueries[n] = 1
Ejemplo n.º 39
0
# This line tells tabarray, that all columns contain string values
# not a numbers. Remove it after loading file
last_line = re.sub('[\t]','fake\t',raw_input_file[0])
out.write(last_line)
out.close()

# reading file into array
raw = tb.tabarray(SVfile = "cleaned_output.tmp",delimiter = '\t')
raw = raw[:-1] # remove hack-line
raw = utils.columnwider(raw)
tmp_tab = raw # create temporal array
os.remove("cleaned_output.tmp") # remove temporal file
#}}}

# create common data base for later processing
bd = prepare.prepare(raw)

# build component list PE3
pe3_array = pe3.pe3(bd)

# save table to file
savelatex(args.pe3, pe3_array)