Exemplo n.º 1
0
def run_single_block(input_list):
    baseName, model, out_dir, rst, window, keep_temp, threshold, penality, DB_file, input_file, strand, depth, block_pos = input_list
    chromosome = block_pos[3]
    start = block_pos[4]
    end = block_pos[5]
    print("Generating blocks ...%s %d %s" %
          (baseName, block_pos[4], block_pos[5]))
    ####Generate sliding windlows
    gw_start_time = datetime.datetime.now()
    blocks = Bedgraph_to_blocks(input_file, fa_file, window, depth, block_pos)
    #ww = open(baseName,'w')
    #for a,b,c in blocks:
    #    ww.write('%s\t%s\t%s\n'%(a,b,c))
    #ww.close()
    gw_end_time = datetime.datetime.now()
    print("Generate blocks used time: {}\n".format(gw_end_time -
                                                   gw_start_time))

    print("Evaluating blocks ...%s %d %s" % (baseName, start, end))
    ev_start_time = datetime.datetime.now()
    Evaluate(baseName, blocks, model, out_dir, rst, window, keep_temp)

    del blocks  #destroyed the block reference
    gc.collect()  #manually run garbage collection process

    ev_end_time = datetime.datetime.now()
    print("Evaluated blocks used time: {}\n".format(ev_end_time -
                                                    ev_start_time))

    print("Postprocessing blocks ...%s %d %s" % (baseName, start, end))
    ps_start_time = datetime.datetime.now()
    Scan_Forward(baseName, threshold, penality, out_dir)
    Scan_Backward(baseName, threshold, penality, out_dir)
    if (keep_temp != 'yes'):
        predict_file = out_dir + '/predict/' + baseName + '.txt'
        os.system('rm %s' % predict_file)
    Postprocess(DB_file, baseName, threshold, penality, out_dir)
    ps_end_time = datetime.datetime.now()
    print("Postprocessed blocks used time: {}\n".format(ps_end_time -
                                                        ps_start_time))

    if (keep_temp != 'yes'):
        forward_file = out_dir + "/maxSum/%s.forward.%d.%d.txt" % (
            baseName, threshold, penality)
        backward_file = out_dir + "/maxSum/%s.backward.%d.%d.txt" % (
            baseName, threshold, penality)
        os.system('rm %s %s' % (forward_file, backward_file))
    #print('Finished postprocessing...%s\n'%baseName)
    return [
        gw_end_time - gw_start_time, ev_end_time - ev_start_time,
        ps_end_time - ps_start_time
    ]
Exemplo n.º 2
0
    def run(self):
    
        # get data
        data = TextDataset(self.src)
        # preprocess
        r = RuleBased()
        e = Evaluate()

        # nltk as baseline
        print("Processsing NLTK as baseline ... ")
        nltk_sentences = r.get_nltk_tokenized_sentences(data.clean_text)
        
        query_list =  [s.split(" ") for s in nltk_sentences]
        begin, end, indexed_sentences = e.find_query_index(data.clean_idx_tokens, query_list)

        evaluation = e.evaluate(data, begin, end)
        result = {"key":"nltk", "begin":begin, "end":end, "sentences":indexed_sentences, "evaluation":evaluation}
        self.results.append(result)
        #import pdb; pdb.set_trace()

        # #rule_based methods

        print("Processsing rule based (Subject) ... ")
        filtered = r.filter_subject(nltk_sentences)
        query_list =  [s.split(" ") for s in filtered]
        begin, end, indexed_sentences = e.find_query_index(data.clean_idx_tokens, query_list)
        evaluation = e.evaluate(data, begin, end)
        result = {"key":"has_subject", "begin":begin, "end":end, "sentences":indexed_sentences, "evaluation":evaluation}
        self.results.append(result)

        print("Processsing rule based (Verb)... ")
        filtered = r.filter_verb(nltk_sentences)
        query_list =  [s.split(" ") for s in filtered]
        begin, end, indexed_sentences = e.find_query_index(data.clean_idx_tokens, query_list)
        evaluation = e.evaluate(data, begin, end)
        result = {"key":"has_verb", "begin":begin, "end":end, "sentences":indexed_sentences, "evaluation":evaluation}
        self.results.append(result)

        print("Processsing rule based (Subject & Verb)... ")
        filtered = r.filter_subject(nltk_sentences)
        filtered = r.filter_verb(filtered)
        query_list =  [s.split(" ") for s in filtered]
        begin, end, indexed_sentences = e.find_query_index(data.clean_idx_tokens, query_list)
        evaluation = e.evaluate(data, begin, end)
        result = {"key":"has_subjectVerb", "begin":begin, "end":end, "sentences":indexed_sentences, "evaluation":evaluation}
        self.results.append(result)

        # write result
        print("Writing data to: " + str(self.dst) + "\033[1m" + str(self.filename) + "\033[0m")
        render = Render(self.dst, self.filename, data, data.ground_truth, self.results)
        render.save()
Exemplo n.º 3
0
    def start(self):
        """Main training loop."""
        for i in range(CFG.num_iterations):
            print("Iteration", i + 1)

            training_data = []  # list to store self play states, pis and vs

            for j in range(CFG.num_games):
                print("Start Training Self-Play Game", j + 1)
                game = self.game.clone()  # Create a fresh clone for each game.
                self.play_game(game, training_data)

            # Save the current neural network model.
            self.net.save_model()

            # Load the recently saved model into the evaluator network.
            self.eval_net.load_model()

            # Train the network using self play values.
            self.net.train(training_data)

            # Initialize MonteCarloTreeSearch objects for both networks.
            current_mcts = MonteCarloTreeSearch(self.net)
            eval_mcts = MonteCarloTreeSearch(self.eval_net)

            evaluator = Evaluate(current_mcts=current_mcts,
                                 eval_mcts=eval_mcts,
                                 game=self.game)
            wins, losses = evaluator.evaluate()

            print("wins:", wins)
            print("losses:", losses)

            num_games = wins + losses

            if num_games == 0:
                win_rate = 0
            else:
                win_rate = wins / num_games

            print("win rate:", win_rate)

            if win_rate > CFG.eval_win_rate:
                # Save current model as the best model.
                print("New model saved as best model.")
                self.net.save_model("best_model")
            else:
                print("New model discarded and previous model loaded.")
                # Discard current model and use previous best model.
                self.net.load_model()
Exemplo n.º 4
0
def evaluateVSM(targeEventFile, collFolder,k,relevTh,vsmClassifierFileName,topK):
    '''
    docs = []
    try:
        classifierFile = open(vsmClassifierFileName,"rb")
        classifier = pickle.load(classifierFile)
        classifierFile.close()
    except:    
        f = open(targeEventFile,'r')
        for url in f:
            url = url.strip()
            d = Document(url)
            if d:
                docs.append(d)
        f.close()
        docsTF = []
        for d in docs:
            wordsFreq = getFreq(d.getWords())
            docsTF.append(wordsFreq)
        
        classifier = VSMClassifier(docsTF,relevTh)
    
    evalres = []
    for j in range(k):
        
        fn = collFolder+str(j)+'.txt'
        f = codecs.open(fn, encoding='utf-8')
        ftext = f.read()
        r = classifier.calculate_score(ftext)[0]
        evalres.append(r)
        f.close()
    '''
    evaluator = Evaluate()
    evaluator.buildVSMClassifier(targeEventFile,vsmClassifierFileName,relevTh,topK)
    collFiles = []
    for j in range(k):
        
        fn = collFolder+str(j)+'.txt'
        f = codecs.open(fn, encoding='utf-8')
        ftext = f.read()
        o = myObj()
        o.text = ftext
        collFiles.append(o)
    res = evaluator.evaluateFC(collFiles)
    #f = open(collFolder+'evaluationRes_VSM.txt','w')
    #f.write('\n'.join([str(r) for r in res]))
    #f.close()
    #print sum(res)
    return res
Exemplo n.º 5
0
    def build_and_evaluate(self):
        matrix = TermDocumentMatrix(self.config)
        td_matrix = matrix.load()

        model = LdaModel(self.config)
        model.build(td_matrix, self.config['alpha'], self.config['beta'], self.config['n_topics'], save_model=True)
        p_zw = model.get_p_zw()

        term_similarity_matrix = TermSimilarityMatrix(self.config)
        term_similarity_matrix.create(p_zw, save=True)

        predictor = WordPredictor(self.config)

        evaluate = Evaluate(self.config)
        evaluate.ground_truth(predictor)
Exemplo n.º 6
0
    def run(self, alpha, beta):
        matrix = TermDocumentMatrix(self.config)
        td_matrix = matrix.load()

        model = LdaModel(self.config)
        model.build(td_matrix, alpha, beta, self.config['n_topics'])
        p_zw = model.get_p_zw()
        self.maxlike = model.get_maxlike()

        term_similarity_matrix = TermSimilarityMatrix(self.config)
        term_similarity_matrix.create(p_zw, save=True)

        predictor = WordPredictor(self.config)

        evaluate = Evaluate(self.config)
        self.score = evaluate.ground_truth(predictor)
Exemplo n.º 7
0
def main(args):
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    summary_writer = tf.summary.FileWriter(os.path.join(args.save_path, 'log'))
    global_steps_counter = itertools.count()  # thread-safe

    global_net = Net(Atari.s_dim, Atari.a_dim, 'global', args)
    num_workers = args.threads
    workers = []

    # create workers
    for i in range(num_workers):
        worker_summary_writer = summary_writer if i == 0 else None
        w = Worker(i, Atari(args), global_steps_counter, worker_summary_writer,
                   args)
        workers.append(w)

    saver = tf.train.Saver(max_to_keep=5)

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        if args.model_path is not None:
            print 'Loading model...\n'
            ckpt = tf.train.get_checkpoint_state(args.model_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print 'Initializing a new model...\n'
            sess.run(tf.global_variables_initializer())
        print_params_nums()
        # Start work process for each worker in a seperated thread
        worker_threads = []
        for w in workers:
            run = lambda: w.run(sess, coord, saver)
            t = threading.Thread(target=run)
            t.start()
            time.sleep(0.5)
            worker_threads.append(t)

        if args.eval_every > 0:
            evaluator = Evaluate(global_net, summary_writer,
                                 global_steps_counter, args)
            evaluate_thread = threading.Thread(
                target=lambda: evaluator.run(sess, coord))
            evaluate_thread.start()

        coord.join(worker_threads)
Exemplo n.º 8
0
def run_single_block(input_list):
    global ref
    #print(ref.keys())
    baseName,model,out_dir,rst,window,keep_temp,threshold,penality,DB_file,input_file,chromosome,strand,depth,start,end = input_list

    #log_dir = out_dir+'/log'
    #if not os.path.exists(log_dir):
    #    os.makedirs(log_dir)
    #log.basicConfig(filename='%s/%s.log'%(log_dir,baseName), level=log.INFO)
    print("Generating blocks ...%s %d %s"%(baseName,start,end))
    ####Generate sliding windlows
    gw_start_time = datetime.datetime.now()
    block = split_chr_bedGraph2(out_dir,input_file,chromosome,strand,window,ref[chromosome],depth,start,end)
    ww = open(baseName,'w')
    for a,b,c in block:
        ww.write('%s\t%s\t%s\n'%(a,b,c))
    ww.close()
    gw_end_time = datetime.datetime.now()
    print("Generate blocks used time: {}\n".format(gw_end_time - gw_start_time))

    print("Evaluating blocks ...%s %d %s"%(baseName,start,end))
    ev_start_time = datetime.datetime.now()
    Evaluate(baseName,block,model,out_dir,rst,window,keep_temp)
    
    del block #destroyed the block reference
    gc.collect() #manually run garbage collection process 

    ev_end_time = datetime.datetime.now()
    print("Evaluated blocks used time: {}\n".format(ev_end_time - ev_start_time))

    print("Postprocessing blocks ...%s %d %s"%(baseName,start,end))
    ps_start_time = datetime.datetime.now()
    Scan_Forward(baseName,threshold,penality,out_dir)
    Scan_Backward(baseName,threshold,penality,out_dir)
    if(keep_temp != 'yes'):
        predict_file = out_dir+'/predict/'+baseName+'.txt'
        os.system('rm %s'%predict_file)
    Postprocess(DB_file,baseName,threshold,penality,out_dir)
    ps_end_time = datetime.datetime.now()
    print("Postprocessed blocks used time: {}\n".format(ps_end_time - ps_start_time))

    if(keep_temp != 'yes'):
        forward_file=out_dir+"/maxSum/%s.forward.%d.%d.txt"%(baseName,threshold,penality)
        backward_file=out_dir+"/maxSum/%s.backward.%d.%d.txt"%(baseName,threshold,penality)
        os.system('rm %s %s'%(forward_file,backward_file))
    #print('Finished postprocessing...%s\n'%baseName)
    return [gw_end_time-gw_start_time,ev_end_time-ev_start_time,ps_end_time-ps_start_time]
Exemplo n.º 9
0
    def do_minimax(board, player, ply, depth):
        """
        For memoization.
        """
        # Stats:
        nonlocal node_count
        nonlocal table_hits
        node_count += 1

        # Transposition:
        board_hash = board.encode()
        if USE_TRANSPOSITION and board_hash in rec_table and\
                depth <= rec_table[board_hash][1]:
            table_hits += 1
            return rec_table[board_hash][0]

        # evaluate board
        b_eval = Evaluate(board)

        if b_eval.utility(ply) != Constants.NON_TERMINAL:  # End game
            ret = b_eval.utility(ply)
        elif depth <= 0:  # max search depth hit
            ret = b_eval.evaluation()
        else:  # recursive case
            successors = board.successors(player)

            # No successors is a draw
            if len(successors) <= 0:
                ret = Constants.DRAW
            elif player == Constants.MAX:
                best_value = Constants.NEGINF
                for succ in successors:
                    v = do_minimax(succ, Constants.MIN, ply + 1, depth - 1)
                    best_value = max(best_value, v)
                ret = best_value
            else:  # if player is minimizer
                best_value = Constants.INF
                for succ in successors:
                    v = do_minimax(succ, Constants.MAX, ply + 1, depth - 1)
                    best_value = min(best_value, v)
                ret = best_value

        # Transposition:
        if USE_TRANSPOSITION:
            rec_table[board_hash] = (ret, depth)
        return ret
Exemplo n.º 10
0
 def walk_proximity(self,
                    trained=True,
                    num_walks=100,
                    walk_length=40,
                    workers=5):
     if trained:
         return np.loadtxt(self.walk_structure_embedding)
     walk_structure = utils.walk_proximity(self.graph.adj,
                                           num_walks,
                                           walk_length,
                                           workers=workers)
     print('游走已完成...')
     loss = Evaluate(10).loss()
     auto_encoder = SparseAE(self.args, walk_structure, loss,
                             self.walk_structure_embedding)
     embedding = auto_encoder.train(parallel=False)
     return embedding
Exemplo n.º 11
0
def compute_scores(raw_data_dir=FLAGS.raw_data, data_dir=FLAGS.data_dir,
  dataset=FLAGS.dataset, save_recommendation=FLAGS.saverec,
  train_dir=FLAGS.train_dir, test=FLAGS.test):
  
  from evaluate import Evaluation as Evaluate
  evaluation = Evaluate(raw_data_dir, test=test)
 
  R = recommend(evaluation.get_uids(), data_dir=data_dir)
  
  evaluation.eval_on(R)
  scores_self, scores_ex = evaluation.get_scores()
  mylog("====evaluation scores (NDCG, RECALL, PRECISION, MAP) @ 2,5,10,20,30====")
  mylog("METRIC_FORMAT (self): {}".format(scores_self))
  mylog("METRIC_FORMAT (ex  ): {}".format(scores_ex))
  if save_recommendation:
    name_inds = os.path.join(train_dir, "indices.npy")
    np.save(name_inds, rec)
Exemplo n.º 12
0
def evaluateClassifier(classifierFile,cf,k):
    
    evaluator = Evaluate()
    evaluator.buildClassifier("posFile","negFolder",classifierFile)
    collFiles = []
    for j in range(k):
        
        fn = cf+str(j)+'.txt'
        f = codecs.open(fn, encoding='utf-8')
        ftext = f.read()
        o = myObj()
        o.text = ftext
        collFiles.append(o)
    res = evaluator.evaluateFC(collFiles)
    f = open(cf+'evaluationRes_Classf.txt','w')
    f.write('\n'.join([str(r) for r in res]))
    f.close()
    print sum(res)
Exemplo n.º 13
0
def main():
    url = "https://race.netkeiba.com/?pid=race_old&id=n201908050411"

    html = requests.get(url)
    soup = BeautifulSoup(html.content, 'lxml')

    race_name, distance = Get_Race_Info(soup)
    print(race_name)
    link_list, horse_list = Get_Link_List(soup)

    #print(link_list)

    for link_url, horse_name in zip(link_list, horse_list):
        df = Scraping(link_url)
        print(horse_name)
        #print(df)

        ave_list = Evaluate(df, distance)
        print(ave_list)
Exemplo n.º 14
0
def main():
    prog = "python -m allennlp.run"
    subcommand_overrides = {}
    # pylint: disable=dangerous-default-value
    parser = argparse.ArgumentParser(description="Run AllenNLP",
                                     usage='%(prog)s',
                                     prog=prog)
    print(parser)

    subparsers = parser.add_subparsers(title='Commands', metavar='')

    subcommands = {
        # Default commands
        "train": Train(),
        "evaluate": Evaluate(),
        "evaluate_mlqa": Evaluate_MLQA(),
        "make-vocab": MakeVocab(),
        "fine-tune": FineTune(),
        # Superseded by overrides
        **subcommand_overrides
    }

    for name, subcommand in subcommands.items():
        subparser = subcommand.add_subparser(name, subparsers)
        subparser.add_argument('--include-package',
                               type=str,
                               action='append',
                               default=[],
                               help='additional packages to include')

    args = parser.parse_args()

    # If a subparser is triggered, it adds its work as `args.func`.
    # So if no such attribute has been added, no subparser was triggered,
    # so give the user some help.
    if 'func' in dir(args):
        # Import any additional modules needed (to register custom classes).
        for package_name in args.include_package:
            import_submodules(package_name)
        args.func(args)
    else:
        parser.print_help()
Exemplo n.º 15
0
def main(out_dir, input_file, input_plus, input_minus, fa_file, keep_temp,
         window, name, model, rst, threshold, penality, DB_file):

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    out_dir = out_dir + '/' + name
    ####Generate sliding windlows
    Generate_windows(out_dir, input_file, input_plus, input_minus, fa_file,
                     keep_temp, window, name)

    data_dir = out_dir + '/data'
    data_files = glob.glob(data_dir + "/*")
    for data in data_files:
        if 'wig' in data:
            continue
        baseName = data.split('/')[-1]
        Evaluate(model, out_dir, rst, window, baseName, keep_temp)
        Scan_Forward(baseName, threshold, penality, out_dir)
        Scan_Backward(baseName, threshold, penality, out_dir)
        if (keep_temp != 'yes'):
            predict_file = out_dir + '/predict/' + baseName + '.txt'
            os.system('rm %s' % predict_file)
        Postprocess(DB_file, baseName, threshold, penality, out_dir)
        if (keep_temp != 'yes'):
            forward_file = out_dir + "/maxSum/%s.forward.%d.%d.txt" % (
                baseName, threshold, penality)
            backward_file = out_dir + "/maxSum/%s.backward.%d.%d.txt" % (
                baseName, threshold, penality)
            os.system('rm %s %s' % (forward_file, backward_file))

    out_file = '%s/%s.predicted.txt' % (out_dir, name)
    ww = open(out_file, 'w')
    ww.write('predicted_pasid\tdb_diff\tdb_pasid\tscore\n')
    ww.close()
    os.system('cat %s/maxSum/*bidirection* >>%s' % (out_dir, out_file))
    if (keep_temp != 'yes'):
        os.system('rm -rf %s/data %s/predict %s/maxSum' %
                  (out_dir, out_dir, out_dir))

    print("Job Done!")
Exemplo n.º 16
0
    def evaluate(self):
        shutil.rmtree(self.flags.vocab_path)
        shutil.rmtree(self.flags.checkpoint_path)

        self.hdfs_client.hdfs_download(
            os.path.join(
                self.flags.output_path,
                os.path.basename(os.path.normpath(self.flags.vocab_path))),
            self.flags.vocab_path)
        self.hdfs_client.hdfs_download(
            os.path.join(self.flags.input_path, 'test.txt'),
            os.path.join(self.flags.datasets_path, 'test.txt'))
        hdfs_checkpoint_path = os.path.join(
            self.flags.output_path,
            os.path.basename(os.path.normpath(self.flags.checkpoint_path)))
        self.hdfs_client.hdfs_download(hdfs_checkpoint_path,
                                       self.flags.checkpoint_path)

        self.data_utils.label_segment_file(
            os.path.join(self.flags.datasets_path, 'test.txt'),
            os.path.join(self.flags.datasets_path, 'label_test.txt'))
        self.data_utils.split_label_file(
            os.path.join(self.flags.datasets_path, 'label_test.txt'),
            os.path.join(self.flags.datasets_path, 'split_test.txt'))

        predict = Predict()
        predict.file_predict(
            os.path.join(self.flags.datasets_path, 'split_test.txt'),
            os.path.join(self.flags.datasets_path, 'test_predict.txt'))

        self.model_evaluate = Evaluate()
        self.model_evaluate.evaluate(
            os.path.join(self.flags.datasets_path, 'test_predict.txt'),
            os.path.join(self.flags.datasets_path, 'test_evaluate.txt'))

        self.hdfs_client.hdfs_delete(
            os.path.join(self.flags.output_path, 'test_evaluate.txt'))
        self.hdfs_client.hdfs_upload(
            os.path.join(self.flags.datasets_path, 'test_evaluate.txt'),
            os.path.join(self.flags.output_path, 'test_evaluate.txt'))
Exemplo n.º 17
0
    def __call__(self,
                 number_of_iterations=2,
                 learning_rate=0.005,
                 embedding_size=300,
                 hidden_size=100,
                 batch_size=100):
        print("Starting 'Image Retrieval' in 'GRU' mode with '" +
              self.difficulty + "' data")

        self.model_full_path = self.model_path + "/" + self.model_name + "_" + self.timestamp + "_" + str(
            learning_rate) + "_" + str(embedding_size) + ".pty"
        self.output_file_name = self.output_path + "/" + self.model_name + "_" + self.timestamp + "_" + str(
            learning_rate) + "_" + str(embedding_size) + ".csv"

        self.number_of_iterations = number_of_iterations
        self.learning_rate = learning_rate
        self.embedding_size = embedding_size
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.model = GRU(self.nwords, self.embedding_size,
                         self.image_feature_size, self.output_vector_size,
                         self.hidden_size, self.batch_size)
        self.criterion = nn.CrossEntropyLoss()

        self.evaluate = Evaluate(self.model, self.img_features, self.minibatch,
                                 self.preprocess, self.image_feature_size,
                                 self.output_vector_size)
        print(self.model)

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.learning_rate)

        self.train_loss_values = []

        self.magic()

        self.save_model()

        self.save_data()
Exemplo n.º 18
0
    def __init__(self):
        self.train_episode = 1000
        self.r = False  # render or not
        self.u = False  # update or not
        self.env = envR.envR(rows=10, cols=10, n_features=10)
        self.max_steps = 30  # (self.env.maze.c - 2) * (self.env.maze.r - 2)
        self.brain = PolicyGradient(n_actions=4,
                                    n_features=(self.env.maze.c *
                                                self.env.maze.r),
                                    learning_rate=0.0001,
                                    reward_decay=0.95,
                                    output_graph=False,
                                    restore=True)

        # used for evaluation
        self.evaluate = Evaluate(rows=10, cols=10, start_pos=(10, 1))
        self.num_fail = 0
        self.num_find_target = 0
        self.cost, self.density = [], []  # dp is deceptive_percentage
        self.opt_cost, self.opt_dp = [], []  # optimal deceptive path
        self.path = []
        self.reward = []
Exemplo n.º 19
0
    def __init__(self, position):
        self.position = position
        
        # Initialise transposition table
        self.TT_SIZE = 2 ** 16
        self.tt = [None] * self.TT_SIZE

        # Used for move ordering with the killer heuristic
        # Indexed by ply and colour
        self.killers = [[None for _ in range(2)] for _ in range(50)]

        # Used for move ordering with the history heuristic
        # Indexed by colour, start square, and end square
        self.history = [[[0 for _ in range(64)] for _ in range(64)] for _ in range(2)]

        # Keeps track of node count during the search
        self.node_count = 0

        # Keeps track of time during the search
        self.start_time = None
        self.time_limit = None

        self.eval = Evaluate()
Exemplo n.º 20
0
def test(RL):
    env = envR(show=False)
    path, cost, density, num_find_target, opt_cost = [], [], [], 0, []
    evaluate = Evaluate(rows=10, cols=10)
    train = False
    succ = 0
    print("****************************************************")
    for episode in range(100):
        pre_maps = env.reset()
        step = 0
        evaluate.set_start(start_pos=env.agent)
        evaluate.set_goals(real_pos=env.maze.food_pos[0],
                           fake_pos=env.maze.food_pos[1])
        # print("****************************************************")
        # print("EPISODE ", episode)
        # start_test = time.time()
        for step in range(100):

            action = RL.choose_action(str(pre_maps), train)

            reward, done, action_ = env.step(action)

            path.append(action_)

            step += 1
            if done:
                succ += 1
                cost, density, num_find_target, opt_cost = evaluation(
                    evaluate, cost, density, num_find_target, opt_cost, path)
                path = []
                break
            pre_maps = env.get_maps()
    print('This is ', episode, 'cost:', step, 'succ', succ)
    print('average cost:', np.mean(cost), ' average density:',
          np.mean(density), ' deceptive extent:', num_find_target / succ)
    print('optimal cost:', np.mean(opt_cost))
    print()
Exemplo n.º 21
0
 def recombinate(self, gen_no, evaluated_num, pop_size):
     print("mutation and crossover...")
     offspring_list = []
     for _ in range(int(pop_size / 2)):
         p1 = self.tournament_selection()
         p2 = self.tournament_selection()
         # crossover
         offset1, offset2 = self.crossover(p1, p2)
         # mutation
         offset1.mutation()
         offset2.mutation()
         offspring_list.append(offset1)
         offspring_list.append(offset2)
     offspring_pops = Population(0)
     offspring_pops.set_populations(offspring_list)
     self.pops.pops.extend(offspring_pops.pops)
     save_offspring(gen_no, offspring_pops)
     # evaluate these individuals
     evaluate = Evaluate(self.pops, self.batch_size)
     evaluate.parse_population(gen_no, evaluated_num)
     #         #save
     self.pops.pops[pop_size:2 * pop_size] = offspring_pops.pops
     save_populations(gen_no=gen_no, pops=self.pops)
     save_each_gen_population(gen_no=gen_no, pops=self.pops)
Exemplo n.º 22
0
def train(use_train=False):
    model = None
    data = np.load(TRAIN_DATA_PATH)
    X = data[:, :-1]
    y = data[:, -1]
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.3,
                                                        random_state=42)

    if use_train:
        model = LogisticRegression(C=5,
                                   class_weight=None,
                                   dual=False,
                                   fit_intercept=True,
                                   intercept_scaling=1,
                                   l1_ratio=None,
                                   max_iter=100,
                                   multi_class='ovr',
                                   penalty='l2',
                                   random_state=None,
                                   solver='lbfgs',
                                   tol=0.0001,
                                   verbose=3,
                                   warm_start=False)
        model.fit(X_train, y_train)
        joblib.dump(model, 'model.pkl')
    else:
        model = joblib.load('model.pkl')
    y_pred = model.predict(X_test)
    print("acc\t:{}\nrecall\t:{}\nf1\t:{}".format(
        accuracy_score(y_test, y_pred),
        recall_score(y_test, y_pred, average='weighted'),
        f1_score(y_test, y_pred, average='weighted')))
    e = Evaluate()
    e.show_need_scores(y_test, y_pred)
Exemplo n.º 23
0
from board import Board, PieceStack, Turn, get_piece_text, EMPTY
from evaluate import Evaluate, WIN
from random import randint
from search import RootOfAlphaBetaSearch

piecestack = PieceStack()

turn = Turn()

board = Board()

evaluate = Evaluate()


def UserTurn(piecestack, board, piece):

    board.show()
    piecestack.show()

    piecestack.TakePiece(piece)

    print('Piece: {0}'.format(get_piece_text(piece)))
    while True:
        x, y = [
            int(i) - 1 for i in raw_input(
                "Enter x y coordinates to place piece: ").split()
        ]
        if board.pieces[x][y] is EMPTY:
            break
        else:
            print('Square is not empty. Try another one.')
Exemplo n.º 24
0
def main():
    # action space
    actionSpace = [[10, 0], [7, 7], [0, 10], [-7, 7], [-10, 0], [-7, -7], [0, -10], [7, -7]]
    numActionSpace = len(actionSpace)

    # state space
    numStateSpace = 4
    xBoundary = [0, 360]
    yBoundary = [0, 360]
    checkBoundaryAndAdjust = ag.CheckBoundaryAndAdjust(xBoundary, yBoundary)

    initSheepPositionMean = np.array([180, 180])
    initWolfPositionMean = np.array([180, 180])
    initSheepPositionNoise = np.array([120, 120])
    initWolfPositionNoise = np.array([60, 60])
    sheepPositionReset = ag.SheepPositionReset(initSheepPositionMean, initSheepPositionNoise, checkBoundaryAndAdjust)
    wolfPositionReset = ag.WolfPositionReset(initWolfPositionMean, initWolfPositionNoise, checkBoundaryAndAdjust)

    numOneAgentState = 2
    positionIndex = [0, 1]

    sheepPositionTransition = ag.SheepPositionTransition(numOneAgentState, positionIndex, checkBoundaryAndAdjust)
    wolfPositionTransition = ag.WolfPositionTransition(numOneAgentState, positionIndex, checkBoundaryAndAdjust)

    numAgent = 2
    sheepId = 0
    wolfId = 1
    transitionFunction = env.TransitionFunction(sheepId, wolfId, sheepPositionReset, wolfPositionReset,
                                                sheepPositionTransition, wolfPositionTransition)
    minDistance = 15
    isTerminal = env.IsTerminal(sheepId, wolfId, numOneAgentState, positionIndex, minDistance)

    screen = pg.display.set_mode([xBoundary[1], yBoundary[1]])
    screenColor = [255, 255, 255]
    circleColorList = [[50, 255, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50], [50, 50, 50],
                       [50, 50, 50], [50, 50, 50], [50, 50, 50]]
    circleSize = 8
    saveImage = False
    saveImageFile = 'image'
    render = env.Render(numAgent, numOneAgentState, positionIndex, screen, screenColor, circleColorList, circleSize,
                        saveImage, saveImageFile)

    aliveBouns = -1
    deathPenalty = 20
    rewardDecay = 0.99
    rewardFunction = reward.TerminalPenalty(sheepId, wolfId, numOneAgentState, positionIndex, aliveBouns, deathPenalty, isTerminal)
    accumulateRewards = PG.AccumulateRewards(rewardDecay, rewardFunction)

    maxTimeStep = 150
    sampleTrajectory = PG.SampleTrajectory(maxTimeStep, transitionFunction, isTerminal)

    approximatePolicy = PG.ApproximatePolicy(actionSpace)
    trainPG = PG.TrainTensorflow(actionSpace)

    numTrajectory = 20
    maxEpisode = 1000

    # Generate models.
    learningRate = 1e-4
    hiddenNeuronNumbers = [128, 256, 512, 1024]
    hiddenDepths = [2, 4, 8]
    # hiddenNeuronNumbers = [128]
    # hiddenDepths = [2]
    generateModel = GeneratePolicyNet(numStateSpace, numActionSpace, learningRate)
    models = {(n, d): generateModel(d, round(n / d)) for n, d in it.product(hiddenNeuronNumbers, hiddenDepths)}
    print("Models generated")

    # Train.
    policyGradient = PG.PolicyGradient(numTrajectory, maxEpisode, render)
    trainModel = lambda model: policyGradient(model, approximatePolicy,
                                                             sampleTrajectory,
                                                             accumulateRewards,
                                                             trainPG)
    trainedModels = {key: trainModel(model) for key, model in models.items()}
    print("Finished training")

    # Evaluate
    modelEvaluate = Evaluate(numTrajectory, approximatePolicy, sampleTrajectory, rewardFunction)
    meanEpisodeRewards = {key: modelEvaluate(model) for key, model in trainedModels.items()}
    print("Finished evaluating")
    # print(meanEpisodeRewards)

    # Visualize
    independentVariableNames = ['NeuroTotalNumber', 'layerNumber']
    draw(meanEpisodeRewards, independentVariableNames)
    print("Finished visualizing", meanEpisodeRewards)
            optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)

        print(model)
        print(model_fname)

        # # train the model
        # for param in model.parameters():
        #     param.requires_grad = True

        model.train()
        train_loop()

    # evaluate the model
    if args.evaluate:
        if args.evaluate_on_cpu:
            device = "cpu"

        model = model.to(device)
        model.eval()

        if args.train:
            Evaluate(model, test_loader, outpath, args.target, device, args.n_epochs)
        elif args.load:
            Evaluate(model, test_loader, outpath, args.target, device, args.load_epoch)

## -----------------------------------------------------------
# # to retrieve a stored variable in pkl file
# import pickle
# with open('../../test_tmp_delphes/experiments/PFNet7_gen_ntrain_2_nepochs_3_batch_size_3_lr_0.0001/confusion_matrix_plots/cmT_normed_epoch_0.pkl', 'rb') as f:  # Python 3: open(..., 'rb')
#     a = pickle.load(f)
Exemplo n.º 26
0
 def playit(self):
     point = 0
     evaluation_ai = Evaluate(self.board, True)
     aiMoves = evaluation_ai.checkPossibleMoves()
     depth = 0
     print(aiMoves[0][0][0])
Exemplo n.º 27
0
        axs[0].grid()
        axs[0].set_title('Loss')
        axs[1].plot(history['Train_dice'], label='Train Dice')
        axs[1].plot(history['Valid_dice'], label='Valid Dice')
        axs[1].legend()
        axs[1].grid()
        axs[1].set_title('Dice')
        plt.savefig('../output/loss_dice.png')

    ########################################################################
    # Evaluate the network
    # get all predictions of the validation set: maybe a memory error here.
    if args.load_mod:
        # load the best model
        net.load_state_dict(torch.load(MODEL_FILE))
        eva = Evaluate(net, device, validloader, args, isTest=False)
        eva.search_parameter()
        dice, dicPred, dicSubmit = eva.predict_dataloader()
        # eva.plot_sampled_predict()

        # evaluate the prediction
        sout = '\nFinal Dice {:.3f}\n'.format(dice) +\
         '==============Predict===============\n' + \
         analyze_labels(pd.DataFrame(dicPred)) # +\
        #	'==============True===============\n' + \
        #	analyze_labels(stat_df_valid)
        # print(sout)
        # print2file(sout, LOG_FILE)
        # print2file(' '.join(str(key)+':'+str(val) for key,val in eva.dicPara.items()), LOG_FILE)

        # load swa model
Exemplo n.º 28
0
            p2 = os.path.join(path, "a-" + file)
            al = align.face_features(p, p2)
            ev = utils.parse_evaluate(al, args.parsing_checkpoint, cuda=cuda)
            p = os.path.join(path, "b-" + file)
            cv2.imwrite(p, ev)
            ev = 255 - utils.img_edge(ev)
            p = os.path.join(path, "c-" + file)
            cv2.imwrite(p, ev)
    elif args.phase == "dataset":
        dataset = FaceDataset(args, "test")
        dataset.pre_process(cuda)
    elif args.phase == "preview":
        log.info("preview picture")
        path = "../export/regular/model.jpg"
        img = cv2.imread(path)
        img2 = utils.parse_evaluate(img, args.parsing_checkpoint, cuda)
        img3 = utils.img_edge(img2)
        img3_ = ops.fill_grey(img3)
        img4 = align.face_features(path)
        log.info("{0} {1} {2} {3}".format(img.shape, img2.shape, img3_.shape,
                                          img4.shape))
        ops.merge_4image(img, img2, img3_, img4, show=True)
    elif args.phase == "evaluate":
        log.info("evaluation mode start")
        evl = Evaluate(args, cuda=cuda)
        img = cv2.imread(args.eval_image).astype(np.float32)
        x_ = evl.itr_train(img)
        evl.output(x_, img)
    else:
        log.error("not known phase %s", args.phase)
Exemplo n.º 29
0
class DataHandler:
    #클래스 멤버: 연산기 하나
    evaluator = Evaluate()

    #class method : 전역함수처럼 쓸 수 있다
    @classmethod
    def GetRawdataInDic(cls, filename):
        rawdata = {}
        with open(filename, 'rb') as f:
            while 1:
                try:
                    data = pickle.load(f)
                except EOFError:
                    break

                rawdata.update(data)

        return rawdata

    def __init__(self, filename, clsname):
        self.rawdata = DataHandler.GetRawdataInDic(filename)
        self.clsname = clsname

        #연산한 값을 저장해두는 저장소
        #필요할 떄 연산하되, 이미 연산된 값이면 연산없이 저장된 값을 반환
        self.cache = {}

    def get_scores(self):
        if 'scores' not in self.cache:
            self.cache['scores'] = list(self.rawdata.values())
        return self.cache.get('scores')

    #cache
    def get_average(self):
        if 'average' not in self.cache:
            self.cache['average'] = self.evaluator.average(self.get_scores())
        return self.cache.get('average')

    def get_variance(self):
        if 'variace' not in self.cache:
            vari = round(
                self.evaluator.variance(self.get_scores(), self.get_average()))
            self.cache['variance'] = vari
        return self.cache.get('variance')

    def get_standard_deviation(self):
        if "standard_deviation" not in self.cache:
            std_dev = round(math.sqrt(self.get_variance()), 1)
            self.cache["standard_deviation"] = std_dev
            return self.cache.get("standard_deviation")

    def WhoIsHighest(self):
        if 'highest' not in self.cache:
            self.cache['highest'] = reduce(
                lambda a, b: a
                if self.rawdata.get(a) > self.rawdata.get(b) else b,
                self.rawdata.keys())
        return self.cache.get('highest')

    def GetHighestScore(self):
        return self.rawdata[self.WhoIsHighest()]

    def WhoIsLowest(self):
        if "lowest" not in self.cache:
            self.cache['lowest'] = reduce(
                lambda a, b: a
                if self.rawdata.get(a) < self.rawdata.get(b) else b,
                self.rawdata.keys())
        return self.cache.get('lowest')

    def GetLowestScore(self):
        return self.rawdata[self.WhoIsLowest()]

    def get_evaluation(self):
        print('*' * 50)
        print("%s 반 성적 분석 결과" % self.clsname)
        print("{0}반의 평균은 {1}점이고 분산은 {2}이며,따라서 표준편차는{3}이다".\
              format(self.clsname, self.get_average(), self.get_variance()\
                     , self.get_standard_deviation()))
        print('*' * 50)
        print("%s 반 종합 평가" % self.clsname)
        print('*' * 50)
        self.evaluateClass()

    def evaluateclass(self):
        avrg = self.get_average()
        std_dev = self.get_standard_deviation()

        if avrg < 50 and std_dev > 20:
            print("성적이 너무 저조하고 학생들의 실력 차이가 너무 크다.")
        elif avrg > 50 and std_dev > 20:
            print("성적은 평균이상이지만 학생들 실력 차이가 크다. 주의 요망!")
        elif avrg < 50 and std_dev < 20:
            print("학생들간 실력차는 나지 않으나 성적이 너무 저조하다. 주의 요망!")
        elif avrg > 50 and std_dev < 20:
            print("성적도 평균 이상이고 학생들의 실력차도 크지 않다.")
Exemplo n.º 30
0
def trainer(epochs, model, optimizer, scheduler, train_dataloader,
            test_dataloader, batch_train, batch_test, device):

    max_grad_norm = 1.0
    train_loss_set = []

    for e in trange(epochs, desc="Epoch"):

        while gc.collect() > 0:
            pass

        # Training
        # Set our model to training mode (as opposed to evaluation mode)
        model.train()

        # if e > 8:
        #     model.freeze_bert()

        # Tracking variables
        tr_loss = 0
        nb_tr_examples, nb_tr_steps = 0, 0

        # Train the data for one epoch
        for step, batch in enumerate(train_dataloader):
            # Add batch to GPU
            batch = tuple(t.to(device) for t in batch)
            # Unpack the inputs from our dataloader
            b_input_ids, b_input_mask, b_adj, b_adj_mwe, b_labels, b_target_idx, _ = batch

            # Clear out the gradients (by default they accumulate)
            optimizer.zero_grad()
            # Forward pass
            ### For BERT + GCN and MWE
            loss = model(b_input_ids.to(device), adj=b_adj, adj_mwe=b_adj_mwe ,attention_mask=b_input_mask.to(device), \
                        labels=b_labels, batch=batch_train, target_token_idx=b_target_idx.to(device))

            train_loss_set.append(loss.item())
            # Backward pass
            loss.backward(retain_graph=True)
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
            # Update parameters and take a step using the computed gradient
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()

            # Update tracking variables
            tr_loss += loss.item()
            nb_tr_examples += b_input_ids.size(0)
            nb_tr_steps += 1

        print("Train loss: {}".format(tr_loss / nb_tr_steps))

        # Validation

        # Put model in evaluation mode to evaluate loss on the validation set
        model.eval()

        all_preds = torch.FloatTensor()
        all_labels = torch.LongTensor()
        test_indices = torch.LongTensor()

        # Evaluate data for one epoch
        for batch in test_dataloader:
            # Add batch to GPU
            batch = tuple(t.to(device) for t in batch)
            # Unpack the inputs from our dataloader
            b_input_ids, b_input_mask, b_adj, b_adj_mwe, b_labels, b_target_idx, test_idx = batch
            # Telling the model not to compute or store gradients, saving memory and speeding up validation
            with torch.no_grad():
                # Forward pass, calculate logit predictions
                ### For BERT + GCN and MWE
                logits = model(b_input_ids.to(device), adj=b_adj, adj_mwe=b_adj_mwe, attention_mask=b_input_mask.to(device), \
                               batch=batch_test, target_token_idx=b_target_idx.to(device))

                # Move logits and labels to CPU
                logits = logits.detach().cpu()
                label_ids = b_labels.cpu()
                test_idx = test_idx.cpu()

                all_preds = torch.cat([all_preds, logits])
                all_labels = torch.cat([all_labels, label_ids])
                test_indices = torch.cat([test_indices, test_idx])

    scores = Evaluate(all_preds, all_labels)
    print('scores.accuracy()={}\nscores.precision_recall_fscore()={}'.format(
        scores.accuracy(), scores.precision_recall_fscore()))

    return scores, all_preds, all_labels, test_indices