Esempio n. 1
0
def evaluate(prev_model,
             cur_model,
             readouts=200,
             verbose=1,
             resign_threshold=0.95):
    ''' returns True if cur model should be used in future games '''
    prev_model_save_path = os.path.join(MODELS_DIR, prev_model)
    cur_model_save_path = os.path.join(MODELS_DIR, cur_model)
    game_output_dir = os.path.join(SELFPLAY_DIR, cur_model)
    game_holdout_dir = os.path.join(HOLDOUT_DIR, cur_model)
    sgf_dir = os.path.join(SGF_DIR, cur_model)
    cur_win_pct = main.evaluate_evenly(prev_model_save_path,
                                       cur_model_save_path,
                                       game_output_dir,
                                       readouts=readouts,
                                       games=goparams.EVAL_GAMES_PER_SIDE)

    print('Evalute Win Pct = ', cur_win_pct)

    qmeas.record('evaluate_win_pct', cur_win_pct)
    keep = False
    if cur_win_pct >= goparams.EVAL_WIN_PCT_FOR_NEW_MODEL:
        qmeas.record('evaluate_choice', 'new')
        keep = True
    else:
        qmeas.record('evaluate_choice', 'old')
        keep = False
    qmeas.record('eval_summary', {
        'win_pct': cur_win_pct,
        'model': cur_model,
        'keep': keep
    })
    # return keep
    return False
Esempio n. 2
0
def evaluate(prev_model, cur_model, readouts=200, verbose=1, resign_threshold=0.95):
    ''' returns True if cur model should be used in future games '''
    prev_model_save_path = os.path.join(MODELS_DIR, prev_model)
    cur_model_save_path = os.path.join(MODELS_DIR, cur_model)
    game_output_dir = os.path.join(SELFPLAY_DIR, cur_model)
    game_holdout_dir = os.path.join(HOLDOUT_DIR, cur_model)
    sgf_dir = os.path.join(SGF_DIR, cur_model)
    cur_win_pct = main.evaluate_evenly(prev_model_save_path, cur_model_save_path, game_output_dir, readouts=readouts, games=goparams.EVAL_GAMES_PER_SIDE)

    print('Evalute Win Pct = ', cur_win_pct)

    qmeas.record('evaluate_win_pct', cur_win_pct)
    keep = False
    if cur_win_pct >= goparams.EVAL_WIN_PCT_FOR_NEW_MODEL:
      qmeas.record('evaluate_choice', 'new')
      keep = True
    else:
      qmeas.record('evaluate_choice', 'old')
      keep = False
    qmeas.record('eval_summary', {'win_pct': cur_win_pct, 'model': cur_model, 'keep': keep})
    return keep 
Esempio n. 3
0

if __name__ == '__main__':
  #qmeas.start()
  #qmeas.create_main_profiler()
  #white_model = sys.argv[1]
  #black_model = sys.argv[2]
  #print('whtie = ', white_model)
  #print('black = ', black_model)

  models = get_models_from_argv()

  curve = []

  for i in range(len(models)):
    output_dir = '/tmp/play_models'
    os.system('mkdir ' + output_dir);
    last_win = main.evaluate_evenly(models[i], models[-1], output_dir=output_dir, games=10)
    curve.append(1 - last_win)
    print('CURVE:')
    for i, c in enumerate(curve):
      print(i, c)

  for i, v in enumerate(curve):
    print('{}\t{}'.format(i, v))

  #qmeas.record_profiler()
  #qmeas.end()


Esempio n. 4
0
from utils import timer
from tensorflow import gfile
import logging

import qmeas

if __name__ == '__main__':
    #qmeas.start()
    #qmeas.create_main_profiler()
    white_model = sys.argv[1]
    black_model = sys.argv[2]
    print('whtie = ', white_model)
    print('black = ', black_model)

    output_dir = '/tmp/play_models'
    os.system('mkdir ' + output_dir)
    winners = main.evaluate_evenly(black_model,
                                   white_model,
                                   output_dir=output_dir,
                                   games=10)

    print()
    print()
    print()
    print('white = ', white_model)
    print('black = ', black_model)
    print('white win pct = ', winners)
    #qmeas.record_profiler()
    #qmeas.end()
    return models


if __name__ == '__main__':
    # qmeas.start()
    # qmeas.create_main_profiler()
    #white_model = sys.argv[1]
    #black_model = sys.argv[2]
    #print('whtie = ', white_model)
    #print('black = ', black_model)

    models = get_models_from_argv()

    curve = []

    for i in range(len(models)):
        output_dir = '/tmp/play_models'
        os.system('mkdir ' + output_dir)
        last_win = main.evaluate_evenly(
            models[i], models[-1], output_dir=output_dir, games=10)
        curve.append(1 - last_win)
        print('CURVE:')
        for i, c in enumerate(curve):
            print(i, c)

    for i, v in enumerate(curve):
        print('{}\t{}'.format(i, v))

    # qmeas.record_profiler()
    # qmeas.end()
Esempio n. 6
0
import shutil

from utils import timer
from tensorflow import gfile
import logging

import qmeas

if __name__ == '__main__':
  #qmeas.start()
  #qmeas.create_main_profiler()
  white_model = sys.argv[1]
  black_model = sys.argv[2]
  print('whtie = ', white_model)
  print('black = ', black_model)

  output_dir = '/tmp/play_models'
  os.system('mkdir ' + output_dir);
  winners = main.evaluate_evenly(black_model, white_model, output_dir=output_dir, games=10)

  print()
  print()
  print()
  print('white = ', white_model)
  print('black = ', black_model)
  print('white win pct = ', winners)
  #qmeas.record_profiler()
  #qmeas.end()