コード例 #1
0
ファイル: core.py プロジェクト: nolan88/BayesianTracker
    def export(self, filename):
        """ Export the track data in the appropriate format for subsequent
        analysis. Note that the HDF5 format is intended to store references to
        objects, rather than the tracks themselves, so we need to deal with
        that differently here...

        TODO(arl): Make sure that we are working with an exisiting HDF5 file!
        """
        # log the output
        logger.info('Exporting {0:d} tracks to file...'.format(self.n_tracks))
        if not filename.endswith('hdf5'):
            utils.export(filename, self.tracks)
        else:
            utils.export_HDF(filename, self.refs, dummies=self.dummies)
コード例 #2
0
ファイル: generationHandler.py プロジェクト: Alligator/evo
def export():
    piece = raw_input("Enter piece ID > ")
    filename = raw_input("Enter a filename > ")
    utils.export(generation.get()[int(piece)], filename)
    print "File written"
コード例 #3
0
# -*- coding: utf-8 -*-
import utils

usernames = ['BarackObama', 'billclinton', 'br', 'Cristiano', 'Ibra_official', 'JohnCleese', 'jeffzrebiecsun', 'kerrywashington', 'NatGeo', 'tnyCloseRead']

count = 200
for username in usernames:
	tweets = utils.cleanTweets(utils.getTweets(username, count - 1))
	utils.export(username+"-tweets.txt", tweets, "w+")
	print(username, "has done")
コード例 #4
0
# -*- coding: utf-8 -*-
import utils

usernames = ['sertaberener', 'DemetAkalin', 'hulyavsar', 'sertaberener', 'gulbenergen', 'MuratBoz', 'Niltakipte']

count = 3000

for username in usernames:
	tweets = utils.cleanTweets(utils.getTweets(username, count))
	utils.export("data/"+username+"-tweets.txt", tweets, "w")
コード例 #5
0
ファイル: mesh.py プロジェクト: zhangtianer521/point2mesh
 def export(self, file):
     vs = self.vs.cpu().clone()
     vs -= self.translations[None, :]
     vs *= self.scale
     export(file, vs, self.faces)
コード例 #6
0
  r'^/src/device/io/*$',
  r'^/src/engine/interpreter/*$',
  r'^/src/memory/*$',
  r'^/include/*$',
  r'^/include/cpu/*$',
  r'^/include/device/*$',
  r'^/include/memory/*$',
  r'^/include/monitor/*$',
  r'^/include/rtl/*$',
]

BLACK_LIST = [
  r'/build/',
  r'/export/',
  r'/gen-expr/',
  r'/kvm-diff/',
  r'/recorder/',
  r'/.git/',
  r'mips32',
  r'riscv32',
  r'x86',
  r'/resource/bbl',
  r'/engine/rv64',
  r'/device/audio.c',
  r'/device/vga.c',
  r'/device/keyboard.c',
  r'runall.sh',
]

export(WHITE_LIST, BLACK_LIST)
コード例 #7
0
 def test_export(self):
     with utils.export({'number': 'JACOB'}) as path:
         workbook = openpyxl.load_workbook(path)
         sheet = workbook['JACOB']
         assert sheet['C3'].internal_value == 'JACOB'
         workbook.close()
コード例 #8
0
def simulate():
    args = utils.process_args(vars(utils.parser.parse_args()))
    print(args)
    Ns, densities, solvers, budgets, nsim, costType, verbose, loadPrev, standardize = args
    result_dict = []
    result_colnums_names = [
        'N', 'Density', 'Solver', 'Budget', 'Cost', 'Time_avg', 'Time_sd',
        'Sol_avg', 'Sol_sd'
    ]
    total_simulations = utils.getTotalSimulation(
        [Ns, densities, budgets, costType])
    total_simulations *= nsim
    progress = 0
    loadPrev_outer = loadPrev

    try:
        for N in Ns:
            for density in densities:
                for budget in budgets:
                    for cost in costType:
                        sols = np.zeros((nsim, len(solvers)))
                        times = np.zeros((nsim, len(solvers)))
                        if loadPrev:
                            try:
                                print(
                                    "\nLoading previously saved test instances..."
                                )
                                try:
                                    update_cost = False
                                    sims, new_budget = utils.load_saved_instance(
                                        N, density, budget, cost)
                                except:
                                    print("Need to update costs...")
                                    update_cost = True
                                    sims, new_budget = utils.load_saved_instance(
                                        N, density, budget, None)
                            except:
                                print(
                                    "Failed to load... Creating new instances..."
                                )
                                sims = []
                                loadPrev = False
                        else:
                            print("Creating new instances...")
                            sims = []

                        for sim in range(nsim):
                            if loadPrev and sim < len(sims):
                                changed_instance = False
                                G, B, U, C = sims[sim]
                                if update_cost:
                                    print("\nUpdating costs...")
                                    C = generate_cost(G, cost)
                                    sims[sim] = G, B, U, C
                                    changed_instance = True
                                if new_budget:
                                    print(
                                        "\nReusing test cases but with different budget..."
                                    )
                                    B = 5 * G.order() * budget
                            else:
                                changed_instance = True
                                G = generate_random_dag(N, density)
                                B = 5 * N * budget
                                U = generate_utility(G)
                                C = generate_cost(G, cost)
                                sims.append((G, B, U, C))
                            for solver_index in range(len(solvers)):
                                solver = solvers[solver_index]
                                if solver == "ilp":
                                    if cost == "monotone":
                                        C_ilp = C[0]
                                        s_time, s_sol = ilp_time(G, C[0], B, U)
                                    elif cost == "add":
                                        s_time, s_sol = ilp_time(G, C, B, U)
                                elif solver == "bf":
                                    s_time, s_sol = brute_force_time(
                                        G, C, B, U, cost)
                                elif solver == "gd":
                                    s_time, s_sol = greedy_time(
                                        G, C, B, U, cost)
                                elif solver == "gd2":
                                    s_time, s_sol = greedy2_time(
                                        G, C, B, U, cost)
                                sols[sim, solver_index] = s_sol
                                times[sim, solver_index] = s_time
                            progress += 1
                            if verbose:
                                utils.update_progress(progress /
                                                      total_simulations)
                        if changed_instance or new_budget:
                            print("\nTest instances saved for future use.")
                            utils.save_instance(sims, N, density, budget, cost)

                        result_dict.extend(
                            utils.generate_result_dict(N, density, budget,
                                                       cost, solvers, sols,
                                                       times, standardize))
                        loadPrev = loadPrev_outer

        utils.export(result_colnums_names, result_dict)
    except KeyboardInterrupt:
        utils.export(result_colnums_names, result_dict)
コード例 #9
0
ファイル: eval.py プロジェクト: pombredanne/CRFSuiteTagger
def conll(data,
          cols=('form', 'postag', 'chunktag', 'guesstag'),
          *args,
          **kwargs):
    """Evaluates chunking f1-score provided with data with the following fields:
    form, postag, chunktag, guesstag

    Currently uses the CoNLL-2000 evaluation script to make the estimate.

    This method will be deprecated with version 0.2

    :param data: np.array
    :param cols: columns to be used for the evaluation
    :type cols: str or tuple or list
    :return: f1-score estimate
    :rtype: AccuracyResults
    """
    warnings.warn(
        'Using the CoNLL-2000 evaluation script is deprecated. `bio` '
        'evaluation should be used instead.')
    try:
        os.makedirs(join(os.getcwd(), 'tmp/'))
    except OSError:
        pass

    td = join(os.getcwd(), 'tmp/')

    rn = rnd.randint(1000, 1000000000000)

    fp_dp = join(td,
                 'chdata.%s.%s.tmp' % (time.asctime().replace(' ', ''), rn))
    fp_res = join(td,
                  'chres.%s.%s.tmp' % (time.asctime().replace(' ', ''), rn))
    fh_out = open(fp_res, 'w')

    export(data, open(fp_dp, 'w'), cols=cols, ts=' ')

    cwd = os.getcwd()
    prl = join(cwd, 'conll_eval.pl' + random_str())
    with open(prl, 'w') as fh:
        fh.write(conll_script)
    c = cmd('perl %s -l < {}' % prl, fp_dp, cwd=cwd, stdout=fh_out)

    r = AccuracyResults()

    try:
        check_call(c)
        r.parse_conll_eval_table(fp_res)
    except CalledProcessError:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        print "*** print_tb:"
        traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
        print "*** print_exception:"
        traceback.print_exception(exc_type,
                                  exc_value,
                                  exc_traceback,
                                  limit=2,
                                  file=sys.stdout)
    finally:
        os.remove(fp_dp)
        os.remove(fp_res)
        os.remove(prl)
        return r
コード例 #10
0
ファイル: eval.py プロジェクト: savkov/CRFSuiteTagger
def conll(data, cols=('form', 'postag', 'chunktag', 'guesstag'),
          *args, **kwargs):
    """Evaluates chunking f1-score provided with data with the following fields:
    form, postag, chunktag, guesstag

    Currently uses the CoNLL-2000 evaluation script to make the estimate.

    This method will be deprecated with version 0.2

    :param data: np.array
    :param cols: columns to be used for the evaluation
    :type cols: str or tuple or list
    :return: f1-score estimate
    :rtype: AccuracyResults
    """
    warnings.warn('Using the CoNLL-2000 evaluation script is deprecated. `bio` '
                  'evaluation should be used instead.')
    try:
        os.makedirs(join(os.getcwd(), 'tmp/'))
    except OSError:
        pass

    td = join(os.getcwd(), 'tmp/')

    rn = rnd.randint(1000, 1000000000000)

    fp_dp = join(td, 'chdata.%s.%s.tmp' % (time.asctime().replace(' ', ''), rn))
    fp_res = join(td, 'chres.%s.%s.tmp' % (time.asctime().replace(' ', ''), rn))
    fh_out = open(fp_res, 'w')

    export(data,
           open(fp_dp, 'w'),
           cols=cols,
           ts=' ')

    cwd = os.getcwd()
    prl = join(cwd, 'conll_eval.pl' + random_str())
    with open(prl, 'w') as fh:
        fh.write(conll_script)
    c = cmd(
        'perl %s -l < {}' % prl,
        fp_dp,
        cwd=cwd,
        stdout=fh_out
    )

    r = AccuracyResults()

    try:
        check_call(c)
        r.parse_conll_eval_table(fp_res)
    except CalledProcessError:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        print "*** print_tb:"
        traceback.print_tb(exc_traceback,
                           limit=1,
                           file=sys.stdout)
        print "*** print_exception:"
        traceback.print_exception(exc_type,
                                  exc_value,
                                  exc_traceback,
                                  limit=2,
                                  file=sys.stdout)
    finally:
        os.remove(fp_dp)
        os.remove(fp_res)
        os.remove(prl)
        return r
コード例 #11
0
def export(_id):
    collection = mongo.db.courses
    _id = bson.ObjectId(_id)
    course = collection.find_one(_id)
    with utils.export(course) as path:
        return flask.send_file(path, mimetype='application/vnd.ms-excel', as_attachment=True)
コード例 #12
0
# -*- coding: utf-8 -*-
import utils
import os
import nltk

positives, negatives = [], []
for filename in os.listdir("data"):
    if filename.endswith(".txt"):
        with open('data/' + filename) as f:
            tweets = [tweet for tweet in f.readlines()]
        pos, neg = utils.groupTweets(tweets)

        utils.export("train/positives.txt", pos, "a")
        utils.export("train/negatives.txt", neg, "a")