Ejemplo n.º 1
0
class Game:
    def __init__(self):
        self.board = Board()
        self.evaluation = Evaluation(self.board)
        self.view = View(self.board)

    def __finish(self, computerPlayer):
        if self.evaluation.winner() == computerPlayer:
            print "Computer player wins"
        elif self.evaluation.isTie():
            print "Tie game"
        else:
            print "Human player wins"

    def run(self):
        startPlayer = self.view.inputStartPlayer()
        computerPlayer = 'X' if startPlayer == 2 else 'O'
        ai = AI(computerPlayer, self.board)
        while self.evaluation.winner() == None and not self.evaluation.isTie():
            self.view.displayBoard()
            if self.board.getPlayer() == computerPlayer:
                ai.makeMove()
            else:
                move = self.view.inputMove()
                self.board.move(move)
        self.view.displayBoard()
        self.__finish(computerPlayer)
def main(argv):
    if len(argv) < 2 or len(argv) > 3:
        print("Usage: python printCSV.py [locationId] [userLimit] [userOffset]")
        sys.exit(2)

    db = DbRequests()
    evaluate = Evaluation()

    limit = int(argv[1])
    locationId = str(argv[0])
    offset = 0

    if len(argv) == 3:
        offset = int(argv[2])

    res = db.users_and_hotel_in_location(locationId)
    i = 0

    for row in res:
        if row[2] == 5:
            if i >= offset:
                evaluate.evaluateDistinct(row[0], locationId, row[1])

            i += 1

        if i >= (limit+offset):
            break

    evaluate.printCSV()
    sys.exit(2)
Ejemplo n.º 3
0
 def __mustBlock(self):
     for move in self.board.validPositions():
         test = Board(self.board.fetch(), self.otherPlayer).move(move)
         evaluation = Evaluation(test)
         if evaluation.winner() == self.otherPlayer:
             return move
     return None
Ejemplo n.º 4
0
class Game:
    def __init__(self):
        self.board = Board()
        self.evaluation = Evaluation(self.board)
        self.view = View(self.board)

    def __finish(self, computerPlayer):
        if self.evaluation.winner() == computerPlayer:
            print "Computer player wins"
        elif self.evaluation.isTie():
            print "Tie game"
        else:
            print "Human player wins"

    def run(self):
        startPlayer = self.view.inputStartPlayer()
        computerPlayer = 'X' if startPlayer == 2 else 'O'
        ai = AI(computerPlayer, self.board)
        while self.evaluation.winner() == None and not self.evaluation.isTie():
            self.view.displayBoard()
            if self.board.getPlayer() == computerPlayer:
                ai.makeMove()
            else:
                move = self.view.inputMove()
                self.board.move(move)
        self.view.displayBoard()
        self.__finish(computerPlayer)
Ejemplo n.º 5
0
 def __wouldWin(self, board):
     for move in board.validPositions():
         test = Board(board.fetch(), self.player).move(move)
         evaluation = Evaluation(test)
         if evaluation.winner() == self.player:
             return True
     return False
Ejemplo n.º 6
0
def backtest():
    # Main loop of backtesting

    DIR = '/Data/fx10-20 1H.pkl'
    EMA_PERIODS = 24
    EMA_TP = 50
    EMA_SL = 50
    INIT_CAP = 10000
    RISK = 0.05

    data_loader = PickleLoader(DIR)
    strategy = EmaCross(data_loader, EMA_PERIODS, EMA_TP, EMA_SL)
    portfolio = ConstantRiskPortfolio(INIT_CAP, RISK)
    execution = ExecutionHandlerSimulation(data_loader)
    performance = Evaluation(data_loader, portfolio)

    while data_loader.continue_backtest:
        signals = strategy.compute()

        orders = portfolio.order(signals=signals)

        fills = execution.execute(orders=orders)

        portfolio.update(fills)

        performance.evaluate()

        data_loader.set()

    return
Ejemplo n.º 7
0
    def majority_aggregation(self, input, gold_standards):

        # if os.path.exists(os.getcwd() + path_entities_memory):
        #     (input, gold_standards) = joblib.load(os.getcwd() + path_entities_memory)
        # else:
        #     self.baseline_agregate_NE()
        #     (input, gold_standards) = joblib.load(os.getcwd() + path_entities_memory)

        person_counter = 0
        accur_uni, accu_year = (0.0, 0.0)

        # for key in input.keys():
        tempo = input
        university_repetition = {x: tempo[0].count(x) for x in tempo[0]}
        year_repetition = {x: tempo[1].count(x) for x in tempo[1]}

        max_uni, max_repeated = self.get_max_university(university_repetition)
        #print("year_repetition: ", year_repetition)
        tempo = self.get_max_years(year_repetition)

        if tempo is None:
            years = []
        else:
            years = [tempo[0][0], tempo[1][0]]

        eval = Evaluation(gold_standards[1], {max_uni}, years)
        accur = eval.total_accuracy()

        accur_uni += accur[0]
        accu_year += accur[1]

        # person_counter += 1

        return (accur_uni, accu_year)
Ejemplo n.º 8
0
def load_dependencies(sqlite_db_path):
    print(sqlite_db_path)
    conn = sqlite3.connect(Config.NOW_DB_PATH)
    cursor = conn.cursor()
    query = (
        "select D.type as 'DEPENDENCY_TYPE', "
        "EV_INFLU.trial_id, EV_INFLU.id, EV_INFLU.checkpoint, EV_INFLU.code_component_id, EV_INFLU.activation_id, "
        "EV_INFLU.repr, EV_INFLU.member_container_activation_id, EV_INFLU.member_container_id, CC_INFLU.name, CC_INFLU.type, "
        "EV_DEPEND.trial_id, EV_DEPEND.id, EV_DEPEND.checkpoint, EV_DEPEND.code_component_id, EV_DEPEND.activation_id, "
        "EV_DEPEND.repr, EV_DEPEND.member_container_activation_id, EV_DEPEND.member_container_id, CC_DEPEND.name, CC_DEPEND.type "
        "from dependency D "
        "join evaluation EV_DEPEND on D.dependent_id = EV_DEPEND.id "
        "join evaluation EV_INFLU on D.dependency_id = EV_INFLU.id "
        "join code_component CC_DEPEND on EV_DEPEND.code_component_id = CC_DEPEND.id "
        "join code_component CC_INFLU on EV_INFLU.code_component_id = CC_INFLU.id "
    )
    dependencies = []
    for tupl in cursor.execute(query, []):
        typeof = tupl[0]
        target = Evaluation(tupl[1], tupl[2], tupl[3], tupl[4], tupl[5],
                            tupl[6], tupl[7], tupl[8], tupl[9], tupl[10])
        source = Evaluation(tupl[11], tupl[12], tupl[13], tupl[14], tupl[15],
                            tupl[16], tupl[17], tupl[18], tupl[19], tupl[20])
        dependencies.append(Dependency(source, target, typeof))
    conn.close()
    return dependencies
Ejemplo n.º 9
0
 def __wouldWin(self, board):
     for move in board.validPositions():
         test = Board(board.fetch(), self.player).move(move)
         evaluation = Evaluation(test)
         if evaluation.winner() == self.player:
             return True
     return False
Ejemplo n.º 10
0
 def __mustBlock(self):
     for move in self.board.validPositions():
         test = Board(self.board.fetch(), self.otherPlayer).move(move)
         evaluation = Evaluation(test)
         if evaluation.winner() == self.otherPlayer:
             return move
     return None
Ejemplo n.º 11
0
 def __countLosingMoves(self, board):
     count = 0
     for move in board.validPositions():
         test = Board(board.fetch(), self.otherPlayer).move(move)
         evaluation = Evaluation(test)
         if evaluation.winner() == self.otherPlayer:
             count = count + 1
     return count
Ejemplo n.º 12
0
 def __countLosingMoves(self, board):
     count = 0
     for move in board.validPositions():
         test = Board(board.fetch(), self.otherPlayer).move(move)
         evaluation = Evaluation(test)
         if evaluation.winner() == self.otherPlayer:
             count = count + 1
     return count
Ejemplo n.º 13
0
 def validate(self, features):
     features = self.getFeatureList(self.holdout, features)
     self.validationData['predictedLabel'] = self.classifier.predict(
         features)
     self.validation = Evaluation(self.holdoutTarget,
                                  self.holdout.predictedLabel.tolist())
     self.validation.accuracy()
     self.validation.recall()
     self.validation.precision()
Ejemplo n.º 14
0
def main():
	#cf = CF()
	#baseline = Baseline()
	evaluation = Evaluation()
	#evaluation.cal_rmse("yelp_baseline_prediction_upd.data")
	#evaluation.cal_mae("yelp_baseline_prediction_upd.data")
	#evaluation.cal_rmse("yelp_cf_prediction_10000.data")
	#evaluation.cal_rmse("yelp_cf_k100_prediction_10000.data")
	#evaluation.cal_rmse("yelp_cf_k5_prediction_10000.data")
	#evaluation.cal_rmse("yelp_cf_k50_prediction_10000.data")	
	#evaluation.cal_rmse("yelp_baseline_prediction_10000.data")
	evaluation.cal_rmse("yelp_svd_cf_prediction_10000.data")
	evaluation.cal_rmse("yelp_svd_k100_prediction_10000.data")
	evaluation.cal_rmse("yelp_svd_k5_prediction_10000.data")
	evaluation.cal_rmse("yelp_svd_k50_prediction_10000.data")
	#evaluation.cal_rmse("yelp_cluster_cf_prediction_10000.data")
	#evaluation.cal_rmse("yelp_cf_baseline_prediction_10000.data")
	#evaluation.cal_rmse("yelp_cluster_cf_baseline_prediction_10000.data")
	#evaluation.cal_mae("yelp_cf_prediction_10000.data")
	#evaluation.cal_mae("yelp_baseline_prediction_10000.data")
	#print "="*20
	#evaluation.cal_mae("yelp_cf_prediction_10000.data")
	#evaluation.cal_mae("yelp_baseline_prediction_10000.data")
	#evaluation.cal_mae("yelp_svd_cf_prediction_10000.data")
	#evaluation.cal_mae("yelp_cluster_cf_prediction_10000.data")
	#evaluation.cal_mae("yelp_cf_baseline_prediction_10000.data")
	#evaluation.cal_mae("yelp_cluster_cf_baseline_prediction_10000.data")
	pass	
Ejemplo n.º 15
0
def evalaution():
    p_k = [5, 20]
    fa = FileAccess()
    relevance_data = fa.get_relevance_data()
    base_dir = os.getcwd()
    all_runs = os.path.join(os.getcwd(), 'all_runs')
    os.chdir(all_runs)
    e = Evaluation()

    for eachfile in glob.glob('*.txt'):
        e.evaluate(eachfile, p_k, base_dir, relevance_data)
Ejemplo n.º 16
0
def load_KL_Data(goldstd_dir,mprc_result_dir, prc_result_dir, medline_dir, mprc_eval_dir,sampleSize):
    '''Load PRC top hits and MPRC top hits'''
    eval = Evaluation(goldstd_dir,mprc_result_dir, prc_result_dir, medline_dir, mprc_eval_dir,sampleSize)
    eval.loadMPRChits()
    eval.loadPRChits()
    prc_tophits = eval.PRCtophits
    mprc_tophits = eval.tophits
    sample = {}
    for key in prc_tophits.keys():
        sample[key] = [mprc_tophits[key],prc_tophits[key]]
    return sample
Ejemplo n.º 17
0
 def train(self, numIterations=100, testCorpusPath=None):
   if testCorpusPath:
     testCorpus = Corpus(testCorpusPath)
   for i in range(1, numIterations + 1):
     self.algorithm.train() # call train method from algorithm
     if i % 10 == 0:
       # trainEval = Evaluation(self.algorithm.corpus)
       # print "Training evaluation for", i, "iteration(s):\n", trainEval.format()
       # self.algorithm.corpus.resetSentStats()
       if testCorpusPath:
         self.setPredictedTags(testCorpus) 
         testEval = Evaluation(testCorpus)
         print "Testing evaluation for", i, "iteration(s):\n",testEval.format()
         testCorpus.resetSentStats() # !!! we can use prototype pattern(so we don't need to loop through sents): here testCorpus = testCorpus.getPrototype() and in Corpus::__init__ : self.prototype = self (google : python prototype)?
Ejemplo n.º 18
0
    def filter_with_RE(self, input, gold_standards):

        entities = input[0]
        years = input[1]

        filtered_ne = set()

        for item in entities:
            if re_organization(item.title()) is not None:
                filtered_ne.add(item)

        golds = gold_standards[1]
        filtered_ne = list(filtered_ne)

        ev = Evaluation(golds, list(filtered_ne), set(list(years)))
        return ev.total_accuracy()
Ejemplo n.º 19
0
 def train(self, numIterations=100, testCorpusPath=None):
     if testCorpusPath:
         testCorpus = Corpus(testCorpusPath)
     for i in range(1, numIterations + 1):
         self.algorithm.train()  # call train method from algorithm
         if i % 10 == 0:
             # trainEval = Evaluation(self.algorithm.corpus)
             # print "Training evaluation for", i, "iteration(s):\n", trainEval.format()
             # self.algorithm.corpus.resetSentStats()
             if testCorpusPath:
                 self.setPredictedTags(testCorpus)
                 testEval = Evaluation(testCorpus)
                 print "Testing evaluation for", i, "iteration(s):\n", testEval.format(
                 )
                 testCorpus.resetSentStats(
                 )  # !!! we can use prototype pattern(so we don't need to loop through sents): here testCorpus = testCorpus.getPrototype() and in Corpus::__init__ : self.prototype = self (google : python prototype)?
Ejemplo n.º 20
0
 def evaluate_guess(self):
     guess = self.Guesses[-1].get_choices()
     answer = self.Answer.get_choices()
     evaluation = Evaluation.evaluate(guess,answer)
     self.Evaluations.append(evaluation)
     if Game.has_won(evaluation):
         self.status = "Won"
         self.endTime = datetime.datetime.now()
Ejemplo n.º 21
0
 def voxel_matrix_from_height_fields(self, first=False):
     vox_mat = mat_from_fields(self.height_fields, self.parent.sax)
     self.voxel_matrix = vox_mat
     if self.mainmesh:
         self.eval = Evaluation(self.voxel_matrix, self.parent)
         self.fab_directions = self.eval.fab_directions
     if self.mainmesh and not first:
         self.parent.update_suggestions()
Ejemplo n.º 22
0
def simulate(heuristic, size, runs, length, max_queue_length, load):
    evaluation = Evaluation()
    for i in range(runs):
        print("##### Run {} #####".format(i + 1))
        run = Run(heuristics[heuristic],
                  size,
                  max_queue_length,
                  length,
                  i,
                  evaluation,
                  load=load)
        run.evaluate()
    print("\n\n##### Results #####")
    print(json.dumps(evaluation.get_results(), indent=4))
    file_name = "data/results_{}_steps_{}_size_{}_runs_{}.json".format(
        heuristic, length, size, runs)
    with open(file_name, "w+") as file:
        json.dump(evaluation.get_results(), file, indent=4)
def run():
    logging.getLogger().setLevel(logging.WARNING)
    d = Dataset()
    #d.use_images_in_folder("/home/simon/Datasets/ImageNet_Natural/images/")
    #d.use_images_in_folder("/home/simon/Datasets/ICAO_german/")
    d.use_images_in_folder("/home/simon/Datasets/desko_ids/images_unique/")
    #d.use_images_in_folder("/home/simon/Datasets/croatianFishDataset-final/")
    #d.use_images_in_folder("/home/jaeger/data/croatianFishDataset1-5Dir/")
    d.create_labels_from_path()
    d.fill_split_assignments(1)

    #d.read_from_file("/home/simon/Datasets/CUB_200_2011/cropped_scaled_alex.txt","imagepaths","string")
    #d.read_from_file("/home/simon/Datasets/CUB_200_2011/tr_ID.txt","split_assignments","int")
    #d.read_from_file("/home/simon/Datasets/CUB_200_2011/labels.txt","labels","int")

    c = Classification()
    c.add_algorithm(Resize(512, 320))
    # #c.add_algorithm(Noise('saltpepper',0.1))
    p = ParallelAlgorithm()
    #
    p1 = AlgorithmPipeline()
    p1.add_algorithm(HOG())
    p1.add_algorithm(SpatialPyramid())
    # #p1.add_algorithm(MinMaxNormalize())
    p1.add_algorithm(NormNormalize())
    p.add_pipeline(p1)

    p2 = AlgorithmPipeline()
    p2.add_algorithm(Resize(64, 32))
    p2.add_algorithm(Colorname())
    p2.add_algorithm(SpatialPyramid())
    p2.add_algorithm(NormNormalize())
    # #p2.add_algorithm(MinMaxNormalize())
    p.add_pipeline(p2)

    c.add_algorithm(p)
    # #c.add_algorithm(MinMaxNormalize())
    #c.add_algorithm(NormNormalize())
    # c.add_algorithm(MeanCalculator())
    #c.add_algorithm(Resize(32,24))
    c.add_algorithm(MulticlassSVM())
    # #c.train(d)
    # #for path, gt_label in zip(d.imagepaths, d.labels):
    # #    logging.info("Predicted class for " + path + " is " + str(c.predict(path).data[0]) + " (GT: " + str(gt_label) + ")")

    ## Caffe features
    #c.add_algorithm(Caffe("","","fc7"))
    #c.add_algorithm(MulticlassSVM())

    #with open('run_evaluation.py', 'r') as fin:
    #    print(fin.read())

    mean_acc, mean_mAP = Evaluation.random_split_eval(
        d, c, absolute_train_per_class=1, runs=1)
    #mean_acc,mean_mAP = Evaluation.fixed_split_eval(d,c)
    logging.warning("Total accuracy is " + str(mean_acc))
    logging.warning("Total mAP is " + str(mean_mAP))
Ejemplo n.º 24
0
 def __init__(self,pairDict, base_dir, database_dir, stemmed_corpus_dir, vocab_dir, knnterm_dir, model, goldstd_dir,mprc_result_dir,prc_result_dir,medline_dir,interpret_dir, sampleSize): 
     super(SimilarityAnalysis,self).__init__(pairDict, base_dir, database_dir, stemmed_corpus_dir, vocab_dir, mprc_result_dir, knnterm_dir, model)
     self.query = pairDict.keys()[0] # current query PMID
     self.interpret_dir = interpret_dir
     self.interpret_file = os.path.join(interpret_dir,"%s"%self.query)
     self.eval = Evaluation(goldstd_dir,mprc_result_dir,prc_result_dir,medline_dir,self.interpret_file,sampleSize)
     self.output = {}
     self.pklout = {}
     self.knnTermDict = {}
Ejemplo n.º 25
0
def main():
    n_of_iteratins = 100  # adjust number of iterations of the simulation
    evaluation = Evaluation()

    print('start')

    # We will run each simulation multiple times and evaluate the results at the end of the program run
    for i in range(n_of_iteratins):
        simulation = Simualation()
        x, y, z, l = simulation.Simulate(0)
        evaluation.ProcessResults(x, y, z, l)
        print('Simulation', i, 'finished')

    for i in range(n_of_iteratins):
        simulation = Simualation()
        x, y, z, l = simulation.Simulate(1)
        evaluation.ProcessResults(x, y, z, l)
        print('Simulation', i, 'finished')

    evaluation.Evaluate()
Ejemplo n.º 26
0
    def closest_to_gold(self, input, gold_standards):

        golds = gold_standards[1]
        uni = golds[0][0].lower()
        years = [str(golds[0][1]), str(golds[0][2])]
        eval = Evaluation(golds, uni, years)
        #print("####>>>>",golds,uni,years)

        common_year = set()
        common_university = set()

        for y_ in input[1]:
            if y_ in years:
                common_year.add(y_)

        for u_ in input[0]:
            if u_ == uni:
                common_university.add(u_)
            elif eval.how_university(u_, uni):
                common_university.add(u_)
        #print("####>>>>",common_university,common_year)

        ev = Evaluation(golds, list(common_university), list(common_year))
        #print("####>>>>",ev.total_accuracy())

        return ev.total_accuracy()
Ejemplo n.º 27
0
def properties_filter():
    # 从各个组件中得到界面中用户选择的查询条件
    ActionData.properties.clear()

    args = {
        'pf': platform_select.get(),
        'ge': genre_select.get(),
        'lb': int(from_year_select.get()),
        'rb': int(to_year_select.get()),
        'cs': int(critical_score_scale.get()),
        'us': round((user_score_scale.get()), 1)
    }
    allowed_rating = []
    for idx in range(len(intVar)):
        if intVar[idx].get():
            allowed_rating.append(rating_list[idx])
    args['ar'] = allowed_rating
    evaluate = Evaluation(args)
    evaluate.print_rule()

    for game in game_list:
        if evaluate.qualified(game):
            ActionData.properties.append(game)

    # 终端符合用户要求的选取结果
    print('【RESULT】', len(ActionData.properties))
    # 对搜索结果按照年份逆序排列
    ActionData.properties = sorted(
        ActionData.properties,
        key=lambda game: game.year_of_release
        if type(game.year_of_release) == int else -1,
        reverse=True)
    # 在窗口中显示符合用户要求的首条记录
    # 检测结果条数是否 > 0
    if len(ActionData.properties):
        ActionData.selection = 0
        result_message['text'] = action_data_agent.change_display()
    else:
        result_message['text'] = '数据库中没有符合要求的游戏'
Ejemplo n.º 28
0
def main():
    # # # Read from standard input
    vocab = int(sys.argv[1])
    n = int(sys.argv[2])
    smoothing_value = float(sys.argv[3])
    training_file = sys.argv[4]
    test_file = sys.argv[5]

    nb = Classifier(vocab, n, smoothing_value)
    nb.train(training_file)
    nb.test(test_file)
    Evaluation(nb)
    ErrorAnalysis(nb)
Ejemplo n.º 29
0
    def __init__(self, config, model_name):
        super(HML, self).__init__()
        self.config = config
        self.use_cuda = self.config['use_cuda']
        self.device = torch.device("cuda" if config['use_cuda'] else "cpu")
        self.model_name = model_name

        if self.config['dataset'] == 'movielens':
            from EmbeddingInitializer import UserEmbeddingML, ItemEmbeddingML
            self.item_emb = ItemEmbeddingML(config)
            self.user_emb = UserEmbeddingML(config)
        elif self.config['dataset'] == 'yelp':
            from EmbeddingInitializer import UserEmbeddingYelp, ItemEmbeddingYelp
            self.item_emb = ItemEmbeddingYelp(config)
            self.user_emb = UserEmbeddingYelp(config)
        elif self.config['dataset'] == 'dbook':
            from EmbeddingInitializer import UserEmbeddingDB, ItemEmbeddingDB
            self.item_emb = ItemEmbeddingDB(config)
            self.user_emb = UserEmbeddingDB(config)

        self.mp_learner = MetapathLearner(config)
        self.meta_learner = MetaLearner(config)

        self.mp_lr = config['mp_lr']
        self.local_lr = config['local_lr']
        self.emb_dim = self.config['embedding_dim']

        self.cal_metrics = Evaluation()

        self.ml_weight_len = len(self.meta_learner.update_parameters())
        self.ml_weight_name = list(
            self.meta_learner.update_parameters().keys())
        self.mp_weight_len = len(self.mp_learner.update_parameters())
        self.mp_weight_name = list(self.mp_learner.update_parameters().keys())

        self.transformer_liners = self.transform_mp2task()

        self.meta_optimizer = torch.optim.Adam(self.parameters(),
                                               lr=config['lr'])
Ejemplo n.º 30
0
def test():
    # For test ---
    filename = 'dset_james_merged_satnum.csv'
    d = Dataset(filename)
    a = Algorithm()
    e = Evaluation(d, a)
    e.getCrossValScores()
    e.plot_PRN_Date(3)
Ejemplo n.º 31
0
    def evaluate(self, subset='test', avgType='macro'):
        self.setEvaluationAverage(avgType)
        if subset == 'test':
            data = self.testData
            target = self.testTarget
            indices = self.testIndices
        elif subset == 'validation':
            data = self.validationData
            target = self.validationTarget
            indices = self.validationIndices

        self.evaluation = Evaluation(target, data.predictedLabel.tolist(),
                                     self.evaluationAverage)
        self.evaluation.setAllTags()
        data = self.tagData(data, indices)
        if subset == 'test':
            self.testData = data
        elif subset == 'validation':
            self.validationData = data

        self.evaluation.accuracy()
        self.evaluation.recall()
        self.evaluation.precision()
Ejemplo n.º 32
0
    def evaluate(self, sess, model):
        evaluation = Evaluation()
        test_dis, test_gene, test_label = self.data_set.test_data

        n_total_hit = 0.0
        n_total_test = 0.0
        dis_num = len(test_dis)
        all_hit_list = list()
        for i in range(dis_num):
            if i % 100 == 0:
                print i, '/', dis_num
            feed_dict = {model.dis: test_dis[i],
                         model.gene: test_gene[i]}
            predict = model.predict_dg(sess, feed_dict)
            hit_list, n_known_genes, n_topk_hit = evaluation.get_top_genes(test_gene[i], predict[0], test_label[i])
            n_total_hit += n_topk_hit
            n_total_test += n_known_genes
            all_hit_list.append(hit_list)

        ap = n_total_hit / n_total_test
        prf_summary = evaluation.cal_prf(all_hit_list, n_total_test)

        return prf_summary, ap, n_total_hit, n_total_test
Ejemplo n.º 33
0
    def __init__(self, brain=None):
        self.brain = Brain()
        self.game_turns = []
        self.winner = dict()
        self.starting_player = dict()
        self.evaluation = Evaluation(self.winner, self.game_turns,
                                     self.starting_player)
        self.start_board_estimation = []
        self.white_20_estimation = []
        self.black_100_estimation = []

        if brain is None:
            self.initialize_neural_network()
        else:
            self.brain = self.brain.load_saved_brain(brain)
Ejemplo n.º 34
0
def main():
    PreProcess.set_all_terms()

    evaluation = Evaluation()

    # Naive Bayes :
    # train = PreProcess('train',0,1)
    test = PreProcess('test',0,1)
    # naive_bayes = NaiveBayes(train.x,train.y,train.all_terms)
    # print(evaluation.get_accuracy(naive_bayes.test(train.x[0:200]), test.y[0:200]))

    # SVM :

    train = PreProcess('train',0,.9)
    validation = PreProcess('train',.9,1)
    out = []
Ejemplo n.º 35
0
    def run_simulation(self):
        """
        Description: This method evaluates model performance when
        concept drift is introduced
        Input: none
        Output: Plots of MSE evolution overtime
        """
        # Initialize empty dictionaries of accuracy metrics
        population_scores_mlr = {}
        population_scores_rfr = {}
        population_scores_gbr = {}

        self.coefficients = self.create_concept_drift()

        # Initialize a collection of year1 population as a dictionaty
        populations_collection = {self.defaults['start_year']: self.df}

        # Initialize an empty collection of samples
        samples_list_collection = {}

        simulation_obj = SimulationModel()

        populations_collection = simulation_obj.simulate_next_populations('Experiment4', \
         self.defaults, self.coefficients, populations_collection, self.dimensionality, self.complexity, self.var_type)

        samples_list_collection = simulation_obj.create_samples_collection(self.defaults, \
         populations_collection, samples_list_collection)

        eval_obj = Evaluation()

        population_scores_mlr, population_scores_rfr, population_scores_gbr = eval_obj.train(self.defaults, \
         population_scores_mlr, population_scores_rfr, population_scores_gbr, samples_list_collection)

        # eval_obj.create_correlation_plots(self.defaults, populations_collection, 'Experiment 4: Corr', \
        #  self.dimensionality, self.complexity, self.var_type)

        # Now we create histograms that visualize the distribution of feature X1 changing overtime:
        eval_obj.create_histograms(self.defaults, populations_collection,
                                   'Experiment 4: distribution of X1',
                                   self.dimensionality, self.complexity,
                                   self.var_type)

        # Now we create plots that visualize MSE of each model for a timespan of t years
        eval_obj.create_plot_MSE(self.defaults, population_scores_mlr, population_scores_rfr, \
         population_scores_gbr, 'Experiment 4 concept drift: MSE overtime', self.dimensionality, self.complexity, self.var_type)

        print(
            'Experiment 4 on % 2d artificially generated observations for % 2d years is finished.'
            % (self.defaults['n_rows'], self.defaults['n_years']))
Ejemplo n.º 36
0
def test(model, data_iter, criterion, triples_set, task, num_entity, num_relation, device):
     model.eval()
     with torch.no_grad():
        start = time.time()
        rank_result_list = dict()
        rank_result_list[task] = []
        total_loss = 0
        num_batch = 0
        for i,batch in enumerate(data_iter):
            X = batch
            X_new,X_labels,MASK_index = X_MASK_Test(X,num_entity,num_relation,task)#[B,L,3]
            #actually here MASK_index=[-1], the last triple of input sequence, only test triple.
            X_new = X_new.to(device) #[B,L,3]
            output = model.forward(X_new)
            label_pre = model.predict(output, task, MASK_index)
            # getting test loss 
            X_labels = X_labels.to(device)
            losses = criterion(label_pre,X_labels,task)
            total_loss += losses.item()
            num_batch += 1
            label_pre = label_pre.cpu() #[B,num_mask,num_class]=[B,1,num_class]
            label_pre = label_pre.reshape(-1,label_pre.size(-1))#[B,num_class]
            # different task has different number of class
            H_n = X[:,-1,0] #[B]
            T_n = X[:,-1,1] #[B]
            R_n = X[:,-1,2] #[B]
            # here only last triple is the test triple
            rank_result = Evaluation(label_pre, H_n.numpy(), T_n.numpy(), 
                                     R_n.numpy(),triples_set,task)
            rank_result_list[task] = rank_result_list[task] + rank_result
        test_loss = total_loss/num_batch
        print('----------Evaluation----------')
        print("total time: {:.3f}s.".format(time.time()-start))
        print("prediction result:")
        result = Print_mean_rank(rank_result_list[task], task)
        return test_loss, result  #[mm, mmr, hits@1, hits@3, hits@10]
heuristics = {
    "GLJD": gljd,
    "EXACT": exact,
    "QBVN": qbvn,
    "DOUBLE":double,
    "QBVN_Cover": qbvn_cover
}
loads = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
seeds = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
switch_sizes = [2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25]
max_queue_lengths = [10,25,50]
simulation_lengths = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]

header = "heuristic,switch_size,max_queue_length,simulation_length,load,packet_delay_average,packet_delay_variance,packet_delay_max,packet_delay_min,queue_length_average,queue_length_variance,queue_length_max,queue_length_min,permutation_matrix_amount_average,permutation_matrix_amount_variance,permutation_matrix_amount_max,permutation_matrix_amount_min,throughput_average,throughput_variance,throughput_max,throughput_min"

with open("data/results.csv", "w+") as file:
    file.write(header + "\n")
    for h in heuristics:
        for s in switch_sizes:
            for m in max_queue_lengths:
                for l in simulation_lengths:
                    for load in loads:
                        evaluation = Evaluation()
                        for i in seeds:
                            print("##### Run {} #####".format(i + 1))
                            run = Run(heuristics[h], s, m, l, i, evaluation, load=load)
                            run.evaluate()
                        line = h + "," + str(s) + "," + str(m) + "," + str(l) + "," + str(load)+"," + evaluation.get_results_csv_line() + "\n"
                        file.write(line)
Ejemplo n.º 38
0
from Evaluation import Evaluation
from Result import Result
from Setting import Setting
import numpy as np

#--------------------------- run the exp ----------------------------
if 1:
    k = 5
    fold = 10
    dataset = Dataset('', '')
    dataset.file_folder_path = '../data/input/'
    
    method = Method('', '')
    method.k = k
    
    evaluation = Evaluation('')
    
    result = Result('', '')
    result.k = k
    
    setting = Setting('', '', dataset, method, result, evaluation)
    setting.fold = fold
    setting.load_classify_save()

if 1:
    fold = 10
    k = 5
    
    result = Result('', '')
    result.k = k
    evaluation = Evaluation('')
Ejemplo n.º 39
0
def eva(formList, groundTruth):
	ev = Evaluation()

	for rank in range(len(formList)):
		fscore = ev.pairwiseFScore(formList[rank]["form"], groundTruth)
		print "Run: #%d = %.4f" % (rank + 1, fscore)
def main(argv):
    if len(argv) != 12:
        # python customEvaluation.py 1209176819 1 0 0.2 0.2 0.2 0.2 0.2 y 0 100
        print "Usage: python customEvaluation.py [location] [limit] [offset] [1] [2] [3] [4] [6] [Measure 5 skipBelow] [lowerBoundReviewCount] [upperBoundReviewCount] [userAmountSkip]"
        sys.exit(2)

    blacklist = [
        "A TripAdvisor Member",
        "lass=",
        "Posted by a La Quinta traveler",
        "Bus_Travel_TX",
        "Posted by an Easytobook.com traveler",
        "Posted by an Accorhotels.com traveler",
        "Posted by a cheaprooms.com traveler",
        "Posted by a Worldhotels.com traveler",
        "Posted by a Virgin Holidays traveler",
        "Posted by an OctopusTravel traveler",
        "Posted by a Hotell.no traveler",
        "Posted by a Husa Hoteles traveler",
        "Posted by a Best Western traveler",
        "Posted by a Langham Hotels traveler",
        "Posted by a trip.ru traveler",
        "Posted by a BanyanTree.com traveler",
        "Posted by a Deutsche Bahn traveler",
        "Posted by a Partner traveler",
        "Posted by a Cleartrip traveler",
        "Posted by a Wyndham Hotel Group traveler"
    ]

    db = DbRequests()
    evaluate = Evaluation()

    locationId = str(argv[0])
    limit = int(argv[1])
    offset = int(argv[2])

    weights = [float(argv[3]), float(argv[4]), float(argv[5]), float(argv[6]), 0, float(argv[7])]

    skipBelow = float(argv[8])

    lowerReviewBound = int(argv[9])
    upperReviewBound = int(argv[10])

    userAmountSkip = int(argv[11])

    res = db.users_and_hotel_in_location_with_bound(locationId, lowerReviewBound, upperReviewBound)
    i = 0

    for row in res:
        if row[0] in blacklist:
            continue

        if row[2] == 5:
            if i >= offset:
                print("User(" + str(i) + "): " + row[0])
                evaluate.evaluateJoined(row[0], locationId, row[1], weights, skipBelow, userAmountSkip)

            i += 1

        if i >= (limit+offset):
            break

    print("\n\n### Input Params ###\n")
    print("Weights:")
    print("Measure 1: " + str(weights[0]))
    print("Measure 2: " + str(weights[1]))
    print("Measure 3: " + str(weights[2]))
    print("Measure 4: " + str(weights[3]))
    print("Measure 5 (skip Below): " + str(argv[8]))
    print("Measure 6: " + str(weights[5]))

    print("\nLocation: " + str(locationId))
    print("Limit/Offset: " + str(limit) + "/" + str(offset))
    print("Lower/Upper Reviewbound: " + str(lowerReviewBound) + "/" + str(upperReviewBound))
    evaluate.printAggregatedJoined()
    #print(evaluate.measuresJoined)
    sys.exit(2)
Ejemplo n.º 41
0
 def __findWinner(self, boards):
     for (option, board) in boards.items():
         e = Evaluation(board)
         if e.winner() == self.player:
             return option
     return None
Ejemplo n.º 42
0
 def __init__(self):
     self.board = Board()
     self.evaluation = Evaluation(self.board)
     self.view = View(self.board)
Ejemplo n.º 43
0
def evaluation():
    e = Evaluation()
    e.standard_derivation()
    e.Spatial_frequency()
    e.cross_Entropy()
Ejemplo n.º 44
0
'''
from Robot import *
from Evaluation import Evaluation

#环境边界
ranges = (0., 10., 0., 10.)
#奖励函数
reward_function = 'mes'
#建立real world , NUM_PTS x NUM_PTS
world = Environment(ranges,
                    NUM_PTS=20,
                    variance=100.0,
                    lengthscale=1.0,
                    visualize=True,
                    seed=3)
evaluation = Evaluation(world, reward_function=reward_function)
# Create the point robot
robot = Robot(sample_world=world.sample_value,
              start_loc=(5.0, 5.0, 0.0),
              ranges=ranges,
              kernel_file=None,
              kernel_dataset=None,
              prior_dataset=None,
              init_lengthscale=1.0,
              init_variance=100.0,
              noise=0.0001,
              path_generator='dubins',
              frontier_size=20,
              horizon_length=5.0,
              turning_radius=0.1,
              sample_step=1.5,
Ejemplo n.º 45
0
class SimilarityAnalysis(MPRC):
    '''Interpret the similarity between two documents and identify matched terms and associated scores.'''
    def __init__(self,pairDict, base_dir, database_dir, stemmed_corpus_dir, vocab_dir, knnterm_dir, model, goldstd_dir,mprc_result_dir,prc_result_dir,medline_dir,interpret_dir, sampleSize): 
        super(SimilarityAnalysis,self).__init__(pairDict, base_dir, database_dir, stemmed_corpus_dir, vocab_dir, mprc_result_dir, knnterm_dir, model)
        self.query = pairDict.keys()[0] # current query PMID
        self.interpret_dir = interpret_dir
        self.interpret_file = os.path.join(interpret_dir,"%s"%self.query)
        self.eval = Evaluation(goldstd_dir,mprc_result_dir,prc_result_dir,medline_dir,self.interpret_file,sampleSize)
        self.output = {}
        self.pklout = {}
        self.knnTermDict = {}
        
    def run_MPRC_SKG(self):
        # get original PRC weights
        self.getVocab() # the vocabulary of the articles in pairDict
        self.vectorizeText()
#         self.getKNNterms()
        self.buildDocFreq() # get the document frequency for every word in the vocabulary
        self.calPRCscores() # calculate the weights
#         self.cal_PRC_Similarity() # calculate the similarity
        orig_wtMatrix = self.prc_matrix # the weight matrix from PRC
        self.adjustWeights()
        self.buildDocFreq() # get the document frequency for every word in the vocabulary
        self.calPRCscores() # calculate the weights
        skg_wtMatrix = self.prc_matrix # the weight matrix from MPRC_SKG
        # print the precision on this query
        self.eval.loadMPRChits()
        self.analzeResults_mprc_skg(orig_wtMatrix, skg_wtMatrix)
#         self.analyzeResults_mprc()
#         self.saveOutput()
        
    def run_PRC(self):
        # get original PRC weights
        self.getVocab() # the vocabulary of the articles in pairDict
        self.vectorizeText()
        self.buildDocFreq() # get the document frequency for every word in the vocabulary
        self.calPRCscores() # calculate the weights
        orig_wtMatrix = self.prc_matrix # the weight matrix from PRC
        # print the precision on this query
        self.eval.loadPRChits()
        self.analyzeResults_prc(orig_wtMatrix)
        self.saveOutput()
    
    def run_MPRC(self):
        '''Compare the difference between PRC's selection and MRPC's selections in terms of matched terms.'''
        self.getVocab() # the vocabulary of the articles in pairDict, 100 articles in the corpus, pmidList size 100
        self.vectorizeText()
        orig_doc_term_matrix = self.doc_term_matrix
        self.buildDocFreq() # get the document frequency for every word in the vocabulary
        self.getKNNterms()
        self.calMPRCscores() # calculate the weights
        self.eval.loadMPRChits()
        self.analyzeResults_mprc(orig_doc_term_matrix)   
        self.saveOutput()     
 
    def analyzeResults_prc(self, orig_wtMatrix):
        summary = ''
        if self.query not in self.eval.PRCtophits.keys():
            print "This query %s does not exist in pre-calculated PRC top hits."%self.query
            return
        for similar in self.eval.PRCtophits[self.query]: # PRC selected similar articles
            matchTermScoreDict = self.analyzeEachPair_prc(similar, orig_wtMatrix)
            self.pklout[similar] = (matchTermScoreDict)
            # output this pair of articles, their matched terms and weight changes
            summary += "Current pair: %s - %s\n" %(self.query, similar)
            for k,v in matchTermScoreDict.iteritems():
                summary += "%s: %s\n"%(k,str(v))
        if self.query not in self.output.keys():
            self.output[self.query] = [summary]
        else:
            self.output[self.query].append(summary)  
    
    def analyzeEachPair_prc(self, similar, orig_wtMatrix):
        '''Analyze PRC outputs'''
        query_vocab_index = np.where(self.doc_term_matrix[self.pmidList.index(self.query),:]>0)[0].tolist() # query_vocab is a list of index, not acutal terms
        # get the vocabulary of the similar text
        similar_vocab_index = np.where(self.doc_term_matrix[self.pmidList.index(similar),:]>0)[0].tolist() # similar article vocabulary indices
        similar_vocab = [self.vocab[index] for index in similar_vocab_index]
        match = {}
        # matched terms in the similar article
        for index in query_vocab_index:
            ori_term = self.vocab[index]
            match[ori_term]=[] # initialize match term dictionary            
        # term weights in the query
        for term in match.keys():
            if term in similar_vocab:
                query_orig_wt = orig_wtMatrix[0,self.vocab.index(term)]
                similar_orig_wt = orig_wtMatrix[self.pmidList.index(similar),self.vocab.index(term)]
                match[term] = [query_orig_wt, similar_orig_wt]
            else:
                query_orig_wt = orig_wtMatrix[0,self.vocab.index(term)]
                match[term] = [query_orig_wt, 0] # 0 means the similar article does not contain this term         
        return match

    def analyzeResults_mprc(self,orig_doc_term_matrix):
        '''Extract every pair of articles and call the analyzer function'''
        for similar in self.eval.tophits[self.query]: # MPRC selected similar articles
            if similar not in self.pmidList: # if MPRC's selection is not in the original BM25 top 100 selection. this should not happen.
                continue
            self.analyzeEachPair_mprc(similar,orig_doc_term_matrix)
        
    def analyzeEachPair_mprc(self,similar,orig_doc_term_matrix):
        '''Analyze a pair of query text and model predicted similar text''' 
        # get the vocabulary of the query text
        query_vocab_index = np.where(orig_doc_term_matrix[self.pmidList.index(self.query),:]>0)[0].tolist() # query_vocab is a list of index, not acutal terms
        query_vocab = [self.vocab[index] for index in query_vocab_index]
        # get the vocabulary of the similar text
        similar_vocab_index = np.where(orig_doc_term_matrix[self.pmidList.index(similar),:]>0)[0].tolist() # query_vocab is a list of index, not acutal terms
        similar_vocab = [self.vocab[index] for index in similar_vocab_index]
        match = {}
        # get the expanded vocabulary of the query text and the matched terms in the similar article
        for index in query_vocab_index:
            ori_term = self.vocab[index]
            overlap=[]
            if ori_term in self.knnTermDict.keys():
                knn_termList = self.knnTermDict[ori_term]
                knn_termList = [t for t in knn_termList if t in self.vocab]
                overlap = list(set([ori_term]+knn_termList).intersection(set(similar_vocab)))
            else:
                overlap = list(set([ori_term]).intersection(set(similar_vocab)))
            if overlap:
                match[ori_term] = overlap
        # output the summary of matched terms
        summary = "Current pair: %s - %s\n" %(self.query, similar)
        summary += "Word count of the query %s: %d\n"%(self.query,np.sum(self.doc_term_matrix[self.pmidList.index(self.query),:]))
        summary += "Word count of the similar article %s: %d\n"%(similar,np.sum(self.doc_term_matrix[self.pmidList.index(similar),:]))
        for k,v in match.iteritems():
            summary += "%s: %s\n"%(k,";".join(v))
        summary += "\n"
        if self.query not in self.output.keys():
            self.output[self.query] = [summary]
        else:
            self.output[self.query].append(summary)
        
    def analzeResults_mprc_skg(self, orig_wtMatrix, skg_wtMatrix):
        '''Extract every pair of articles and call the analyzer function'''
        summary = ''
        for similar in self.eval.tophits[self.query]: # MPRC_SKG selected similar articles
            matchTermScoreDict = self.analyzeEachPair_mprc_skg(similar, orig_wtMatrix, skg_wtMatrix)
            # output this pair of articles, their matched terms and weight changes
            summary += "Current pair: %s - %s\n" %(self.query, similar)
            for k,v in matchTermScoreDict.iteritems():
                summary += "%s: %s\n"%(k,str(v))
                summary += "\n"
        if self.query not in self.output.keys():
            self.output[self.query] = [summary]
        else:
            self.output[self.query].append(summary)
            
    def analyzeEachPair_mprc_skg(self, similar, orig_wtMatrix, skg_wtMatrix):
        '''Analyze MPRC_SKG outputs'''
        query_vocab_index = np.where(self.doc_term_matrix[self.pmidList.index(self.query),:]>0)[0].tolist() # query_vocab is a list of index, not acutal terms
#         query_vocab = [self.vocab[index] for index in query_vocab_index]
        # get the vocabulary of the similar text
        similar_vocab_index = np.where(self.doc_term_matrix[self.pmidList.index(similar),:]>0)[0].tolist() # query_vocab is a list of index, not acutal terms
        similar_vocab = [self.vocab[index] for index in similar_vocab_index]
        match = {}
        # matched terms in the similar article
        for index in query_vocab_index:
            ori_term = self.vocab[index]
            if ori_term in similar_vocab:
                match[ori_term]=[] # initialize match term dictionary
        # term weights in the query
        for term in match.keys():
            query_orig_wt = orig_wtMatrix[0,self.vocab.index(term)]
            query_new_wt = skg_wtMatrix[0,self.vocab.index(term)]
            similar_orig_wt = orig_wtMatrix[self.pmidList.index(similar),self.vocab.index(term)]
            similar_new_wt = skg_wtMatrix[self.pmidList.index(similar),self.vocab.index(term)]
            match[term] = [query_new_wt/query_orig_wt,similar_new_wt/similar_orig_wt]
        return match
                    
    def saveOutput(self):
        fout = file(self.interpret_file,"w")
        for summary  in self.output.values():
            for s in summary:
                fout.write(s)
        pklFile = self.interpret_file+".pkl"
        pickle.dump(self.pklout,file(pklFile,"w"))