Ejemplo n.º 1
0
def load_dependencies(sqlite_db_path):
    print(sqlite_db_path)
    conn = sqlite3.connect(Config.NOW_DB_PATH)
    cursor = conn.cursor()
    query = (
        "select D.type as 'DEPENDENCY_TYPE', "
        "EV_INFLU.trial_id, EV_INFLU.id, EV_INFLU.checkpoint, EV_INFLU.code_component_id, EV_INFLU.activation_id, "
        "EV_INFLU.repr, EV_INFLU.member_container_activation_id, EV_INFLU.member_container_id, CC_INFLU.name, CC_INFLU.type, "
        "EV_DEPEND.trial_id, EV_DEPEND.id, EV_DEPEND.checkpoint, EV_DEPEND.code_component_id, EV_DEPEND.activation_id, "
        "EV_DEPEND.repr, EV_DEPEND.member_container_activation_id, EV_DEPEND.member_container_id, CC_DEPEND.name, CC_DEPEND.type "
        "from dependency D "
        "join evaluation EV_DEPEND on D.dependent_id = EV_DEPEND.id "
        "join evaluation EV_INFLU on D.dependency_id = EV_INFLU.id "
        "join code_component CC_DEPEND on EV_DEPEND.code_component_id = CC_DEPEND.id "
        "join code_component CC_INFLU on EV_INFLU.code_component_id = CC_INFLU.id "
    )
    dependencies = []
    for tupl in cursor.execute(query, []):
        typeof = tupl[0]
        target = Evaluation(tupl[1], tupl[2], tupl[3], tupl[4], tupl[5],
                            tupl[6], tupl[7], tupl[8], tupl[9], tupl[10])
        source = Evaluation(tupl[11], tupl[12], tupl[13], tupl[14], tupl[15],
                            tupl[16], tupl[17], tupl[18], tupl[19], tupl[20])
        dependencies.append(Dependency(source, target, typeof))
    conn.close()
    return dependencies
Ejemplo n.º 2
0
    def closest_to_gold(self, input, gold_standards):

        golds = gold_standards[1]
        uni = golds[0][0].lower()
        years = [str(golds[0][1]), str(golds[0][2])]
        eval = Evaluation(golds, uni, years)
        #print("####>>>>",golds,uni,years)

        common_year = set()
        common_university = set()

        for y_ in input[1]:
            if y_ in years:
                common_year.add(y_)

        for u_ in input[0]:
            if u_ == uni:
                common_university.add(u_)
            elif eval.how_university(u_, uni):
                common_university.add(u_)
        #print("####>>>>",common_university,common_year)

        ev = Evaluation(golds, list(common_university), list(common_year))
        #print("####>>>>",ev.total_accuracy())

        return ev.total_accuracy()
Ejemplo n.º 3
0
def backtest():
    # Main loop of backtesting

    DIR = '/Data/fx10-20 1H.pkl'
    EMA_PERIODS = 24
    EMA_TP = 50
    EMA_SL = 50
    INIT_CAP = 10000
    RISK = 0.05

    data_loader = PickleLoader(DIR)
    strategy = EmaCross(data_loader, EMA_PERIODS, EMA_TP, EMA_SL)
    portfolio = ConstantRiskPortfolio(INIT_CAP, RISK)
    execution = ExecutionHandlerSimulation(data_loader)
    performance = Evaluation(data_loader, portfolio)

    while data_loader.continue_backtest:
        signals = strategy.compute()

        orders = portfolio.order(signals=signals)

        fills = execution.execute(orders=orders)

        portfolio.update(fills)

        performance.evaluate()

        data_loader.set()

    return
Ejemplo n.º 4
0
    def majority_aggregation(self, input, gold_standards):

        # if os.path.exists(os.getcwd() + path_entities_memory):
        #     (input, gold_standards) = joblib.load(os.getcwd() + path_entities_memory)
        # else:
        #     self.baseline_agregate_NE()
        #     (input, gold_standards) = joblib.load(os.getcwd() + path_entities_memory)

        person_counter = 0
        accur_uni, accu_year = (0.0, 0.0)

        # for key in input.keys():
        tempo = input
        university_repetition = {x: tempo[0].count(x) for x in tempo[0]}
        year_repetition = {x: tempo[1].count(x) for x in tempo[1]}

        max_uni, max_repeated = self.get_max_university(university_repetition)
        #print("year_repetition: ", year_repetition)
        tempo = self.get_max_years(year_repetition)

        if tempo is None:
            years = []
        else:
            years = [tempo[0][0], tempo[1][0]]

        eval = Evaluation(gold_standards[1], {max_uni}, years)
        accur = eval.total_accuracy()

        accur_uni += accur[0]
        accu_year += accur[1]

        # person_counter += 1

        return (accur_uni, accu_year)
Ejemplo n.º 5
0
 def __wouldWin(self, board):
     for move in board.validPositions():
         test = Board(board.fetch(), self.player).move(move)
         evaluation = Evaluation(test)
         if evaluation.winner() == self.player:
             return True
     return False
Ejemplo n.º 6
0
 def __mustBlock(self):
     for move in self.board.validPositions():
         test = Board(self.board.fetch(), self.otherPlayer).move(move)
         evaluation = Evaluation(test)
         if evaluation.winner() == self.otherPlayer:
             return move
     return None
Ejemplo n.º 7
0
def test():
    # For test ---
    filename = 'dset_james_merged_satnum.csv'
    d = Dataset(filename)
    a = Algorithm()
    e = Evaluation(d, a)
    e.getCrossValScores()
    e.plot_PRN_Date(3)
Ejemplo n.º 8
0
 def voxel_matrix_from_height_fields(self, first=False):
     vox_mat = mat_from_fields(self.height_fields, self.parent.sax)
     self.voxel_matrix = vox_mat
     if self.mainmesh:
         self.eval = Evaluation(self.voxel_matrix, self.parent)
         self.fab_directions = self.eval.fab_directions
     if self.mainmesh and not first:
         self.parent.update_suggestions()
Ejemplo n.º 9
0
 def __countLosingMoves(self, board):
     count = 0
     for move in board.validPositions():
         test = Board(board.fetch(), self.otherPlayer).move(move)
         evaluation = Evaluation(test)
         if evaluation.winner() == self.otherPlayer:
             count = count + 1
     return count
Ejemplo n.º 10
0
 def train(self, numIterations=100, testCorpusPath=None):
     if testCorpusPath:
         testCorpus = Corpus(testCorpusPath)
     for i in range(1, numIterations + 1):
         self.algorithm.train()  # call train method from algorithm
         if i % 10 == 0:
             trainEval = Evaluation(self.algorithm.corpus)
             print "Training evaluation for", i, "iteration(s):\n", trainEval.format(
             )
             self.algorithm.corpus.resetSentStats()
             if testCorpusPath:
                 self.setPredictedTags(testCorpus)
                 #self.createOutputFile(testCorpus)
                 testEval = Evaluation(testCorpus)
                 print "Testing evaluation for", i, "iteration(s):\n", testEval.format(
                 )
                 testCorpus.resetSentStats(
                 )  # !!! we can use prototype pattern(so we don't need to loop through sents): here testCorpus = testCorpus.getPrototype() and in Corpus::__init__ : self.prototype = self (google : python prototype)?
Ejemplo n.º 11
0
 def validate(self, features):
     features = self.getFeatureList(self.holdout, features)
     self.validationData['predictedLabel'] = self.classifier.predict(
         features)
     self.validation = Evaluation(self.holdoutTarget,
                                  self.holdout.predictedLabel.tolist())
     self.validation.accuracy()
     self.validation.recall()
     self.validation.precision()
Ejemplo n.º 12
0
def evalaution():
    p_k = [5, 20]
    fa = FileAccess()
    relevance_data = fa.get_relevance_data()
    base_dir = os.getcwd()
    all_runs = os.path.join(os.getcwd(), 'all_runs')
    os.chdir(all_runs)
    e = Evaluation()

    for eachfile in glob.glob('*.txt'):
        e.evaluate(eachfile, p_k, base_dir, relevance_data)
Ejemplo n.º 13
0
def main():
    # # # Read from standard input
    vocab = int(sys.argv[1])
    n = int(sys.argv[2])
    smoothing_value = float(sys.argv[3])
    training_file = sys.argv[4]
    test_file = sys.argv[5]

    nb = Classifier(vocab, n, smoothing_value)
    nb.train(training_file)
    nb.test(test_file)
    Evaluation(nb)
    ErrorAnalysis(nb)
Ejemplo n.º 14
0
    def run_simulation(self):
        """
        Description: This method evaluates model performance when
        concept drift is introduced
        Input: none
        Output: Plots of MSE evolution overtime
        """
        # Initialize empty dictionaries of accuracy metrics
        population_scores_mlr = {}
        population_scores_rfr = {}
        population_scores_gbr = {}

        self.coefficients = self.create_concept_drift()

        # Initialize a collection of year1 population as a dictionaty
        populations_collection = {self.defaults['start_year']: self.df}

        # Initialize an empty collection of samples
        samples_list_collection = {}

        simulation_obj = SimulationModel()

        populations_collection = simulation_obj.simulate_next_populations('Experiment4', \
         self.defaults, self.coefficients, populations_collection, self.dimensionality, self.complexity, self.var_type)

        samples_list_collection = simulation_obj.create_samples_collection(self.defaults, \
         populations_collection, samples_list_collection)

        eval_obj = Evaluation()

        population_scores_mlr, population_scores_rfr, population_scores_gbr = eval_obj.train(self.defaults, \
         population_scores_mlr, population_scores_rfr, population_scores_gbr, samples_list_collection)

        # eval_obj.create_correlation_plots(self.defaults, populations_collection, 'Experiment 4: Corr', \
        #  self.dimensionality, self.complexity, self.var_type)

        # Now we create histograms that visualize the distribution of feature X1 changing overtime:
        eval_obj.create_histograms(self.defaults, populations_collection,
                                   'Experiment 4: distribution of X1',
                                   self.dimensionality, self.complexity,
                                   self.var_type)

        # Now we create plots that visualize MSE of each model for a timespan of t years
        eval_obj.create_plot_MSE(self.defaults, population_scores_mlr, population_scores_rfr, \
         population_scores_gbr, 'Experiment 4 concept drift: MSE overtime', self.dimensionality, self.complexity, self.var_type)

        print(
            'Experiment 4 on % 2d artificially generated observations for % 2d years is finished.'
            % (self.defaults['n_rows'], self.defaults['n_years']))
Ejemplo n.º 15
0
    def __init__(self, brain=None):
        self.brain = Brain()
        self.game_turns = []
        self.winner = dict()
        self.starting_player = dict()
        self.evaluation = Evaluation(self.winner, self.game_turns,
                                     self.starting_player)
        self.start_board_estimation = []
        self.white_20_estimation = []
        self.black_100_estimation = []

        if brain is None:
            self.initialize_neural_network()
        else:
            self.brain = self.brain.load_saved_brain(brain)
Ejemplo n.º 16
0
    def filter_with_RE(self, input, gold_standards):

        entities = input[0]
        years = input[1]

        filtered_ne = set()

        for item in entities:
            if re_organization(item.title()) is not None:
                filtered_ne.add(item)

        golds = gold_standards[1]
        filtered_ne = list(filtered_ne)

        ev = Evaluation(golds, list(filtered_ne), set(list(years)))
        return ev.total_accuracy()
Ejemplo n.º 17
0
def main():
    PreProcess.set_all_terms()

    evaluation = Evaluation()

    # Naive Bayes :
    # train = PreProcess('train',0,1)
    test = PreProcess('test',0,1)
    # naive_bayes = NaiveBayes(train.x,train.y,train.all_terms)
    # print(evaluation.get_accuracy(naive_bayes.test(train.x[0:200]), test.y[0:200]))

    # SVM :

    train = PreProcess('train',0,.9)
    validation = PreProcess('train',.9,1)
    out = []
Ejemplo n.º 18
0
def simulate(heuristic, size, runs, length, max_queue_length, load):
    evaluation = Evaluation()
    for i in range(runs):
        print("##### Run {} #####".format(i + 1))
        run = Run(heuristics[heuristic],
                  size,
                  max_queue_length,
                  length,
                  i,
                  evaluation,
                  load=load)
        run.evaluate()
    print("\n\n##### Results #####")
    print(json.dumps(evaluation.get_results(), indent=4))
    file_name = "data/results_{}_steps_{}_size_{}_runs_{}.json".format(
        heuristic, length, size, runs)
    with open(file_name, "w+") as file:
        json.dump(evaluation.get_results(), file, indent=4)
Ejemplo n.º 19
0
def main():
    n_of_iteratins = 100  # adjust number of iterations of the simulation
    evaluation = Evaluation()

    print('start')

    # We will run each simulation multiple times and evaluate the results at the end of the program run
    for i in range(n_of_iteratins):
        simulation = Simualation()
        x, y, z, l = simulation.Simulate(0)
        evaluation.ProcessResults(x, y, z, l)
        print('Simulation', i, 'finished')

    for i in range(n_of_iteratins):
        simulation = Simualation()
        x, y, z, l = simulation.Simulate(1)
        evaluation.ProcessResults(x, y, z, l)
        print('Simulation', i, 'finished')

    evaluation.Evaluate()
Ejemplo n.º 20
0
def properties_filter():
    # 从各个组件中得到界面中用户选择的查询条件
    ActionData.properties.clear()

    args = {
        'pf': platform_select.get(),
        'ge': genre_select.get(),
        'lb': int(from_year_select.get()),
        'rb': int(to_year_select.get()),
        'cs': int(critical_score_scale.get()),
        'us': round((user_score_scale.get()), 1)
    }
    allowed_rating = []
    for idx in range(len(intVar)):
        if intVar[idx].get():
            allowed_rating.append(rating_list[idx])
    args['ar'] = allowed_rating
    evaluate = Evaluation(args)
    evaluate.print_rule()

    for game in game_list:
        if evaluate.qualified(game):
            ActionData.properties.append(game)

    # 终端符合用户要求的选取结果
    print('【RESULT】', len(ActionData.properties))
    # 对搜索结果按照年份逆序排列
    ActionData.properties = sorted(
        ActionData.properties,
        key=lambda game: game.year_of_release
        if type(game.year_of_release) == int else -1,
        reverse=True)
    # 在窗口中显示符合用户要求的首条记录
    # 检测结果条数是否 > 0
    if len(ActionData.properties):
        ActionData.selection = 0
        result_message['text'] = action_data_agent.change_display()
    else:
        result_message['text'] = '数据库中没有符合要求的游戏'
Ejemplo n.º 21
0
    def __init__(self, config, model_name):
        super(HML, self).__init__()
        self.config = config
        self.use_cuda = self.config['use_cuda']
        self.device = torch.device("cuda" if config['use_cuda'] else "cpu")
        self.model_name = model_name

        if self.config['dataset'] == 'movielens':
            from EmbeddingInitializer import UserEmbeddingML, ItemEmbeddingML
            self.item_emb = ItemEmbeddingML(config)
            self.user_emb = UserEmbeddingML(config)
        elif self.config['dataset'] == 'yelp':
            from EmbeddingInitializer import UserEmbeddingYelp, ItemEmbeddingYelp
            self.item_emb = ItemEmbeddingYelp(config)
            self.user_emb = UserEmbeddingYelp(config)
        elif self.config['dataset'] == 'dbook':
            from EmbeddingInitializer import UserEmbeddingDB, ItemEmbeddingDB
            self.item_emb = ItemEmbeddingDB(config)
            self.user_emb = UserEmbeddingDB(config)

        self.mp_learner = MetapathLearner(config)
        self.meta_learner = MetaLearner(config)

        self.mp_lr = config['mp_lr']
        self.local_lr = config['local_lr']
        self.emb_dim = self.config['embedding_dim']

        self.cal_metrics = Evaluation()

        self.ml_weight_len = len(self.meta_learner.update_parameters())
        self.ml_weight_name = list(
            self.meta_learner.update_parameters().keys())
        self.mp_weight_len = len(self.mp_learner.update_parameters())
        self.mp_weight_name = list(self.mp_learner.update_parameters().keys())

        self.transformer_liners = self.transform_mp2task()

        self.meta_optimizer = torch.optim.Adam(self.parameters(),
                                               lr=config['lr'])
Ejemplo n.º 22
0
    def evaluate(self, subset='test', avgType='macro'):
        self.setEvaluationAverage(avgType)
        if subset == 'test':
            data = self.testData
            target = self.testTarget
            indices = self.testIndices
        elif subset == 'validation':
            data = self.validationData
            target = self.validationTarget
            indices = self.validationIndices

        self.evaluation = Evaluation(target, data.predictedLabel.tolist(),
                                     self.evaluationAverage)
        self.evaluation.setAllTags()
        data = self.tagData(data, indices)
        if subset == 'test':
            self.testData = data
        elif subset == 'validation':
            self.validationData = data

        self.evaluation.accuracy()
        self.evaluation.recall()
        self.evaluation.precision()
Ejemplo n.º 23
0
    def evaluate(self, sess, model):
        evaluation = Evaluation()
        test_dis, test_gene, test_label = self.data_set.test_data

        n_total_hit = 0.0
        n_total_test = 0.0
        dis_num = len(test_dis)
        all_hit_list = list()
        for i in range(dis_num):
            if i % 100 == 0:
                print i, '/', dis_num
            feed_dict = {model.dis: test_dis[i],
                         model.gene: test_gene[i]}
            predict = model.predict_dg(sess, feed_dict)
            hit_list, n_known_genes, n_topk_hit = evaluation.get_top_genes(test_gene[i], predict[0], test_label[i])
            n_total_hit += n_topk_hit
            n_total_test += n_known_genes
            all_hit_list.append(hit_list)

        ap = n_total_hit / n_total_test
        prf_summary = evaluation.cal_prf(all_hit_list, n_total_test)

        return prf_summary, ap, n_total_hit, n_total_test
Ejemplo n.º 24
0
def test(model, data_iter, criterion, triples_set, task, num_entity, num_relation, device):
     model.eval()
     with torch.no_grad():
        start = time.time()
        rank_result_list = dict()
        rank_result_list[task] = []
        total_loss = 0
        num_batch = 0
        for i,batch in enumerate(data_iter):
            X = batch
            X_new,X_labels,MASK_index = X_MASK_Test(X,num_entity,num_relation,task)#[B,L,3]
            #actually here MASK_index=[-1], the last triple of input sequence, only test triple.
            X_new = X_new.to(device) #[B,L,3]
            output = model.forward(X_new)
            label_pre = model.predict(output, task, MASK_index)
            # getting test loss 
            X_labels = X_labels.to(device)
            losses = criterion(label_pre,X_labels,task)
            total_loss += losses.item()
            num_batch += 1
            label_pre = label_pre.cpu() #[B,num_mask,num_class]=[B,1,num_class]
            label_pre = label_pre.reshape(-1,label_pre.size(-1))#[B,num_class]
            # different task has different number of class
            H_n = X[:,-1,0] #[B]
            T_n = X[:,-1,1] #[B]
            R_n = X[:,-1,2] #[B]
            # here only last triple is the test triple
            rank_result = Evaluation(label_pre, H_n.numpy(), T_n.numpy(), 
                                     R_n.numpy(),triples_set,task)
            rank_result_list[task] = rank_result_list[task] + rank_result
        test_loss = total_loss/num_batch
        print('----------Evaluation----------')
        print("total time: {:.3f}s.".format(time.time()-start))
        print("prediction result:")
        result = Print_mean_rank(rank_result_list[task], task)
        return test_loss, result  #[mm, mmr, hits@1, hits@3, hits@10]
heuristics = {
    "GLJD": gljd,
    "EXACT": exact,
    "QBVN": qbvn,
    "DOUBLE":double,
    "QBVN_Cover": qbvn_cover
}
loads = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
seeds = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
switch_sizes = [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25]
max_queue_lengths = [10,25,50]
simulation_lengths = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]

header = "heuristic,switch_size,max_queue_length,simulation_length,load,packet_delay_average,packet_delay_variance,packet_delay_max,packet_delay_min,queue_length_average,queue_length_variance,queue_length_max,queue_length_min,permutation_matrix_amount_average,permutation_matrix_amount_variance,permutation_matrix_amount_max,permutation_matrix_amount_min,throughput_average,throughput_variance,throughput_max,throughput_min"

with open("data/results.csv", "w+") as file:
    file.write(header + "\n")
    for h in heuristics:
        for s in switch_sizes:
            for m in max_queue_lengths:
                for l in simulation_lengths:
                    for load in loads:
                        evaluation = Evaluation()
                        for i in seeds:
                            print("##### Run {} #####".format(i + 1))
                            run = Run(heuristics[h], s, m, l, i, evaluation, load=load)
                            run.evaluate()
                        line = h + "," + str(s) + "," + str(m) + "," + str(l) + "," + str(load)+"," + evaluation.get_results_csv_line() + "\n"
                        file.write(line)
Ejemplo n.º 26
0
    return open(file).read(), expect


def prediction(file, expect):

    pred = Predict(file)
    pred.predict()
    predicted.append(pred.predicted)
    expected.append(expect)


if __name__ == '__main__':

    for fic in glob('../corpus/imdb/*/*.txt'):
        # print(fic)
        file, label = getcontentlabel(fic)
        # print(file)
        # print(label)

        prediction(file, label)

    print(predicted)
    print(expected)

    eval = Evaluation(expected, predicted)

    print(eval.getVraisPos())
    print(eval.getFauxNeg())
    print(eval.getFauxPos())
    print(eval.f_mesure())
Ejemplo n.º 27
0
def Action(lang, numOfClusters,states):
    
    
    if states[0] and states[1] and states[2]:   #    111

        
        if len(lang.getArticles()) == 0:
            showMessage("No articles to cluster")
        
        elif numOfClusters < 2 or numOfClusters > len(lang.getArticles()) - 1:    #    K should be between 2 and n-1
            showMessage("Wrong number of clusters")
            
        else:
            try:
                stream = streamNews(lang)
            except Exception() as e:
                print(e)
                showMessage("Error while streaming news")

                try:
                    addToDB(lang, stream)
                except Exception() as e:
                    print(e)
                    showMessage("Error while adding to database")
                
            news = NewsClusters(lang.getArticles(), numOfClusters)
            eval = Evaluation(news)
            eval.createResult()
            showMessage("Clustering completed")
        
        
        
    if states[0] and states[1] and not states[2]:   #    110
        try:
            stream = streamNews(lang)
            addToDB(lang, stream)
            showMessage("{0} articles were merged into database".format(len(stream)))
        except Exception() as e:
            print(e)
            showMessage("Error while streaming/adding to database")
    
    
    
    if states[0] and not states[1] and states[2]:       #    101
        try:
            stream = streamNews(lang)
            if numOfClusters < 2 or numOfClusters > len(stream) - 1:
                showMessage("Wrong number of clusters")
            else:
                news = NewsClusters(stream, numOfClusters)
                eval = Evaluation(news)
                eval.createResult()
                showMessage("Clustering completed")
        except Exception() as e:
            print(e)
            showMessage("Error while streaming news")
            

            
    
    if states[0] and not states[1] and not states[2]:       #    100
        showMessage("Please check add to database/cluster boxes")
    
    if not states[0] and states[1] and states[2]:       #    011
        showMessage("Please check 'stream' box first")
    
    if not states[0] and states[1] and not states[2]:       #    010
        showMessage("Please check 'stream' box first")
    
    if not states[0] and not states[1] and states[2]:       #    001
        if numOfClusters < 2 or numOfClusters > len(lang.getArticles()) - 1:
            showMessage("Wrong number of clusters")
        else:
            news = NewsClusters(lang.getArticles(), numOfClusters)
            eval = Evaluation(news)
            eval.createResult()
            showMessage("Clustering completed")
            
    
    if not states[0] and not states[1] and not states[2]:       #    000
        showMessage("Please choose one of the options")
Ejemplo n.º 28
0
def evaluation():
    e = Evaluation()
    e.standard_derivation()
    e.Spatial_frequency()
    e.cross_Entropy()
Ejemplo n.º 29
0
'''
from Robot import *
from Evaluation import Evaluation

#环境边界
ranges = (0., 10., 0., 10.)
#奖励函数
reward_function = 'mes'
#建立real world , NUM_PTS x NUM_PTS
world = Environment(ranges,
                    NUM_PTS=20,
                    variance=100.0,
                    lengthscale=1.0,
                    visualize=True,
                    seed=3)
evaluation = Evaluation(world, reward_function=reward_function)
# Create the point robot
robot = Robot(sample_world=world.sample_value,
              start_loc=(5.0, 5.0, 0.0),
              ranges=ranges,
              kernel_file=None,
              kernel_dataset=None,
              prior_dataset=None,
              init_lengthscale=1.0,
              init_variance=100.0,
              noise=0.0001,
              path_generator='dubins',
              frontier_size=20,
              horizon_length=5.0,
              turning_radius=0.1,
              sample_step=1.5,
import numpy

from Evaluation import Evaluation
TOPK = 20

PROCESS_USER = False
PROPERGATE_LABEL = False
PREPARE_ANNOTATION = True

PATH_PREFIX = 'C:\\Users\\fxw133\\Desktop\\chicago-movement\\data\\'
PATH2_SURVEY_FILE = PATH_PREFIX + 'ActivitySurvey.csv'
PATH2_DEMO_FILE = PATH_PREFIX + 'demo.csv'
PATH2_TRAJECTORY_FILES = PATH_PREFIX + 'chicago_trajectories\\Original\\GPS_Time1\\*.csv'

## output
PATH2_PROCESSED = PATH_PREFIX + 'trajectory_annotation\\processed_user\\'
PATH2_CODEING = "C:\\Users\\fxw133\\Desktop\\chicago-movement\\data\\trajectory_annotation\\GEOCODE.pkle"

PATH2_VENUEDB = "C:\\Users\\fxw133\\Desktop\\chicago-movement\\data\\grid_indexed_venues.pkle"

user_files = [u for u in glob.glob(PATH2_PROCESSED + '*_ground_cand.pkle')]

r_list = list()

for ufile in user_files:
    one_user = pickle.load(open(ufile, 'rb'))
    one_user.annotate_by_measure(measure='dist+pop')
    correct, tot = Evaluation().topk_annotation_acc(one_user, 5)
    print one_user.uid, correct, tot
    r_list.append((correct, tot, correct / float(tot)))
    #pdb.set_trace()