예제 #1
0
파일: utils.py 프로젝트: bearrundr/ELF
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addStrOption(
         'record_dir',
         'directory to record in',
         './record')
     spec.addStrOption(
         'save_prefix',
         'prefix of savefiles',
         'save')
     spec.addStrOption(
         'save_dir',
         'directory for savefiles',
         os.environ.get('save', './'))
     spec.addStrOption(
         'latest_symlink',
         'name for latest model symlink',
         'latest')
     spec.addIntOption(
         'num_games',
         'number of games',
         1024)
     spec.addIntOption(
         'batchsize',
         'batch size',
         128)
     return spec
예제 #2
0
파일: model.py 프로젝트: qucheng/ELF-1
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addBoolOption('bn', 'toggles batch norm', True)
        spec.addBoolOption('leaky_relu', 'toggles leaky ReLU', False)
        spec.addIntOption('gpu', 'which gpu to use', -1)

        return spec
예제 #3
0
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addIntOption(
            'freq_update',
            'frequency of model update',
            1)
        spec.addIntOption(
            'num_games',
            'number of games',
            1024)
        spec.addIntOption(
            'batchsize',
            'batch size',
            128)
        spec.addIntOption(
            'gpu',
            'which GPU to use',
            -1)
        spec.addIntOption(
            'T',
            'number of timestamps',
            6)
        spec.addStrOption(
            'parsed_args',
            'dummy option',
            '')

        spec.merge(Stats.get_option_spec('trainer'))
        spec.merge(ModelSaver.get_option_spec())

        return spec
예제 #4
0
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addBoolOption('bn', 'toggles batch norm', True)
     spec.addBoolOption('leaky_relu', 'toggles leaky ReLU', True)
     spec.addIntOption('num_layer', 'number of layers', 39)
     spec.addIntOption('dim', 'model dimension', 128)
     return spec
예제 #5
0
    def get_option_spec(cls, model_class=None, model_idx=None):
        spec = PyOptionSpec()
        spec.addStrOption('load', 'load model', '')
        spec.addStrListOption(
            'onload',
            ('functions to call after loading. e.g., reset,zero_first_layer. '
             'These functions are specified in the model'), [])
        spec.addStrListOption('omit_keys', 'omitted keys when loading', [])
        spec.addStrListOption('replace_prefix', 'replace prefix', [])
        spec.addIntOption('gpu', 'which GPU to use', -1)
        spec.addBoolOption(
            'check_loaded_options',
            'Toggles consistency check of loaded vs. current model options.',
            True)
        spec.addBoolOption('use_fp16', 'use_fp16', False)
        spec.addFloatOption(
            'load_model_sleep_interval',
            ('If zero, has no effect. If positive, then before loading the '
             'model, we will sleep for an interval of '
             'duration (secs) ~ Uniform[0, load_model_sleep_interval]'), 0.0)

        if model_class is not None and hasattr(model_class, 'get_option_spec'):
            spec.merge(model_class.get_option_spec())

        idx_suffix = '' if model_idx is None else str(model_idx)
        spec.addPrefixSuffixToOptionNames('', idx_suffix)

        return spec
예제 #6
0
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addBoolOption('bn', 'toggles batch norm', True)
     spec.addBoolOption('leaky_relu', 'toggles leaky ReLU', False)
     spec.addFloatOption('bn_momentum',
                         'batch norm momentum (pytorch style)', 0.1)
     spec.addIntOption('dim', 'model dimension', 128)
     return spec
예제 #7
0
파일: df_model3.py 프로젝트: bearrundr/ELF
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addIntOption(
            'num_block',
            'number of resnet blocks',
            20)
        spec.merge(Block.get_option_spec())

        return spec
예제 #8
0
파일: trainer.py 프로젝트: liudengfeng/ELF
    def get_option_spec(cls, name='eval'):
        spec = PyOptionSpec()
        spec.addStrListOption('keys_in_reply', 'keys in reply', [])
        spec.addIntOption('num_minibatch', 'number of minibatches', 5000)
        spec.addStrListOption('parsed_args', 'dummy option', [])

        spec.merge(Stats.get_option_spec(name))

        return spec
예제 #9
0
	def get_option_spec(cls):
		spec = PyOptionSpec()
		spec.addIntOption(
			'num_block',
			'number of resnet blocks',
			20)
		spec.merge(Block.get_option_spec())

		return spec
예제 #10
0
파일: eval_iters.py 프로젝트: bearrundr/ELF
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addIntOption(
         'num_eval',
         'number of games to evaluate',
         500)
     spec.addBoolOption(
         'tqdm',
         'toggle tqdm visualization',
         False)
     return spec
예제 #11
0
파일: utils.py 프로젝트: alatyshe/ELF
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addStrOption('record_dir', 'directory to record in', './record')
     spec.addStrOption('save_prefix', 'prefix of savefiles', 'save')
     spec.addStrOption('save_dir', 'directory for savefiles',
                       os.environ.get('save', './'))
     spec.addStrOption('latest_symlink', 'name for latest model symlink',
                       'latest')
     spec.addIntOption('num_games', 'number of games', 1024)
     spec.addIntOption('batchsize', 'batch size', 128)
     return spec
예제 #12
0
파일: trainer.py 프로젝트: ARVILab/ELF
    def get_option_spec(cls, name='eval'):
        # print("\u001b[31;1m|py|\u001b[0m\u001b[37m", "Evaluator::", inspect.currentframe().f_code.co_name)
        # print("\u001b[31;1m", os.path.dirname(os.path.abspath(__file__)), " - ", os.path.basename(__file__), "\u001b[0m")

        spec = PyOptionSpec()
        spec.addStrListOption('keys_in_reply', 'keys in reply', [])
        spec.addIntOption('num_minibatch', 'number of minibatches', 5000)
        spec.addStrListOption('parsed_args', 'dummy option', '')

        spec.merge(Stats.get_option_spec(name))

        return spec
예제 #13
0
파일: server.py 프로젝트: qucheng/ELF-1
    def get_option_spec(cls):
        spec = PyOptionSpec()
        elf.saveDefaultOptionsToArgs("", spec)
        elf.saveDefaultNetOptionsToArgs("", spec)
        spec.addIntOption(
            'gpu',
            'GPU id to use',
            -1)
        spec.addStrListOption(
            "parsed_args",
            "dummy option",
            [])

        return spec
예제 #14
0
파일: df_model3.py 프로젝트: bearrundr/ELF
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addBoolOption(
            'bn',
            'toggles batch norm',
            True)
        spec.addBoolOption(
            'leaky_relu',
            'toggles leaky ReLU',
            False)
        spec.addFloatOption(
            'bn_momentum',
            'batch norm momentum (pytorch style)',
            0.1)
        spec.addIntOption(
            'num_block',
            'number of blocks',
            20)
        spec.addIntOption(
            'dim',
            'model dimension',
            128)
        spec.addBoolOption(
            'use_data_parallel',
            'TODO: fill this in',
            False)
        spec.addBoolOption(
            'use_data_parallel_distributed',
            'TODO: fill this in',
            False)
        spec.addIntOption(
            'dist_rank',
            'TODO: fill this in',
            -1)
        spec.addIntOption(
            'dist_world_size',
            'TODO: fill this in',
            -1)
        spec.addStrOption(
            'dist_url',
            'TODO: fill this in',
            '')
        spec.addIntOption(
            'gpu',
            'which gpu to use',
            -1)

        spec.merge(GoResNet.get_option_spec())

        return spec
예제 #15
0
	def get_option_spec(cls):
		spec = PyOptionSpec()
		spec.addBoolOption(
			'bn',
			'toggles batch norm',
			True)
		spec.addBoolOption(
			'leaky_relu',
			'toggles leaky ReLU',
			False)
		spec.addFloatOption(
			'bn_momentum',
			'batch norm momentum (pytorch style)',
			0.1)
		spec.addIntOption(
			'num_block',
			'number of blocks',
			20)
		spec.addIntOption(
			'dim',
			'model dimension',
			128)
		spec.addBoolOption(
			'use_data_parallel',
			'TODO: fill this in',
			False)
		spec.addBoolOption(
			'use_data_parallel_distributed',
			'TODO: fill this in',
			False)
		spec.addIntOption(
			'dist_rank',
			'TODO: fill this in',
			-1)
		spec.addIntOption(
			'dist_world_size',
			'TODO: fill this in',
			-1)
		spec.addStrOption(
			'dist_url',
			'TODO: fill this in',
			'')
		spec.addIntOption(
			'gpu',
			'which gpu to use',
			-1)

		spec.merge(GoResNet.get_option_spec())
		return spec
예제 #16
0
파일: server.py 프로젝트: qucheng/ELF-1
    def get_option_spec(cls):
        spec = PyOptionSpec()
        go.getServerPredefined(spec.getOptionSpec())

        spec.addIntOption('gpu', 'GPU id to use', -1)
        spec.addIntOption(
            'eval_old_model',
            ('If specified, then we directly switch to evaluation mode '
             'between the loaded model and the old model specified by this '
             'switch'), -1)
        spec.addStrOption('comment', 'Comment for this run', '')
        spec.addBoolOption("parameter_print", "Print parameters", True)

        spec.merge(PyOptionSpec.fromClasses((MoreLabels, )))
        return spec
예제 #17
0
    def get_option_spec(cls):
        spec = PyOptionSpec()
        go.getClientPredefined(spec.getOptionSpec())

        spec.addIntOption('gpu', 'GPU id to use', -1)
        spec.addStrOption(
            'eval_model_pair',
            ('If specified for df_selfplay.py, then the two models will be '
             'evaluated on this client'), '')
        spec.addStrOption('comment', 'Comment for this run', '')

        spec.addIntOption('selfplay_timeout_usec', 'Timeout used for MCTS', 10)

        spec.addBoolOption("parameter_print", "Print parameters", True)

        spec.merge(PyOptionSpec.fromClasses((MoreLabels, )))
        return spec
예제 #18
0
 def get_option_spec(cls):
     spec = PyOptionSpec()
     elf_C.setSpecELFOptions(spec.getOptionSpec())
     test.setSpecTSOptions(spec.getOptionSpec())
     spec.addIntOption(
         'gpu',
         'GPU id to use',
         0)
     spec.addStrOption(
         'load',
         'Load old model',
         "")
     spec.addStrListOption(
         'parsed_args',
         'dummy option',
         [])
     return spec
예제 #19
0
파일: trainer.py 프로젝트: bearrundr/ELF
    def get_option_spec(cls, name='eval'):
        spec = PyOptionSpec()
        spec.addStrListOption(
            'keys_in_reply',
            'keys in reply',
            [])
        spec.addIntOption(
            'num_minibatch',
            'number of minibatches',
            5000)
        spec.addStrListOption(
            'parsed_args',
            'dummy option',
            '')

        spec.merge(Stats.get_option_spec(name))

        return spec
예제 #20
0
파일: df_model3.py 프로젝트: bearrundr/ELF
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addBoolOption(
         'bn',
         'toggles batch norm',
         True)
     spec.addBoolOption(
         'leaky_relu',
         'toggles leaky ReLU',
         False)
     spec.addFloatOption(
         'bn_momentum',
         'batch norm momentum (pytorch style)',
         0.1)
     spec.addIntOption(
         'dim',
         'model dimension',
         128)
     return spec
예제 #21
0
파일: df_model2.py 프로젝트: bearrundr/ELF
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addBoolOption(
         'bn',
         'toggles batch norm',
         True)
     spec.addBoolOption(
         'leaky_relu',
         'toggles leaky ReLU',
         True)
     spec.addIntOption(
         'num_block',
         'number of blocks',
         20)
     spec.addIntOption(
         'dim',
         'model dimension',
         128)
     return spec
예제 #22
0
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addIntOption('num_minibatch', 'number of minibatches', 5000)
     spec.addIntOption('num_episode', 'number of episodes', 10000)
     spec.addIntOption('num_process', 'number of processes', 2)
     spec.addBoolOption('tqdm', 'toggle tqdm visualization', False)
     return spec
예제 #23
0
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addIntOption(
            'freq_update',
            'frequency of model update',
            1)
        spec.addBoolOption(
            'save_first',
            'save first model',
            False)
        spec.addIntOption(
            'num_games',
            'number of games',
            1024)
        spec.addIntOption(
            'batchsize',
            'batch size',
            128)

        # print("\u001b[31;1m|py|\u001b[0m\u001b[37m", "Trainer::", inspect.currentframe().f_code.co_name)
        # print("\u001b[31;1m", os.path.dirname(os.path.abspath(__file__)), " - ", os.path.basename(__file__), "\u001b[0m")

        spec.merge(Evaluator.get_option_spec('trainer'))
        spec.merge(ModelSaver.get_option_spec())

        return spec
예제 #24
0
파일: ppo.py 프로젝트: qucheng/ELF-1
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addIntOption(
         'anneal_entropy',
         '',
         0)
     spec.addFloatOption(
         'entropy_ratio',
         '',
         0.01)
     spec.addFloatOption(
         'init_entropy_ratio',
         'the entropy ratio we put on PolicyGradient',
         0.01)
     spec.addFloatOption(
         'final_entropy_ratio',
         '',
         0.0)
     spec.addIntOption(
         'entropy_decay_epoch',
         'decay the entropy linearly during the first k epochs',
         100)
     spec.addFloatOption(
         'min_prob',
         'mininal probability used in training',
         1e-6)
     spec.addFloatOption(
         'ratio_clamp',
         'maximum importance sampling ratio',
         0.1)
     spec.addFloatOption(
         'max_grad_norm',
         'maximum norm of gradient',
         0.5)
     spec.addFloatOption(
         'discount',
         'exponential discount rate',
         0.99)
     return spec
예제 #25
0
    def get_option_spec(cls):
        spec = PyOptionSpec()
        tutorial.getPredefined(spec.getOptionSpec())
        
        spec.addIntOption(
            'client_dummy',
            'Some dummy arguments',
            -1)
        spec.addStrOption(
            'client_dummy2',
            'some string dummy arguments',
            '')
        spec.addBoolOption(
            "client_dummy3",
            "Some boolean dummy arguments",
            True)
        spec.addIntOption(
            'gpu',
            'GPU id to use',
            -1)

        return spec
예제 #26
0
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addIntOption('num_minibatch', 'number of minibatches', 5000)
     spec.addIntOption(
         'num_cooldown',
         'Last #minibatches to refresh running mean/std for batchnorm '
         'in addition to the training stage', 0)
     spec.addIntOption('num_episode', 'number of episodes', 10000)
     spec.addBoolOption('tqdm', 'toggle tqdm visualization', False)
     return spec
예제 #27
0
파일: trainer.py 프로젝트: liudengfeng/ELF
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addIntOption('freq_update', 'frequency of model update', 1)
        spec.addBoolOption('save_first', 'save first model', False)
        spec.addIntOption('num_games', 'number of games', 1024)
        spec.addIntOption('batchsize', 'batch size', 128)

        spec.merge(Evaluator.get_option_spec('trainer'))
        spec.merge(ModelSaver.get_option_spec())

        return spec
예제 #28
0
 def get_option_spec(cls):
     spec = PyOptionSpec()
     test.setSpecOptions(spec.getOptionSpec())
     elf_C.setSpecELFOptions(spec.getOptionSpec())
     spec.addIntOption('gpu', 'GPU id to use', 0)
     spec.addIntOption('freq_update',
                       'How much update before updating the acting model',
                       50)
     spec.addStrOption('distri_mode', 'server or client', "")
     spec.addIntOption('num_recv', '', 2)
     spec.addStrListOption('parsed_args', 'dummy option', [])
     spec.merge(PyOptionSpec.fromClasses((PPO, )))
     return spec
예제 #29
0
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addIntOption(
         'num_minibatch',
         'number of minibatches',
         5000)
     spec.addIntOption(
         'num_episode',
         'number of episodes',
         10000)
     spec.addIntOption(
         'num_process',
         'number of processes',
         2)
     spec.addBoolOption(
         'tqdm',
         'toggle tqdm visualization',
         False)
     return spec
예제 #30
0
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addIntOption(
         'num_minibatch',
         'number of minibatches',
         5000)
     spec.addIntOption(
         'num_cooldown',
         'Last #minibatches to refresh running mean/std for batchnorm '
         'in addition to the training stage',
         0)
     spec.addIntOption(
         'num_episode',
         'number of episodes',
         10000)
     spec.addBoolOption(
         'tqdm',
         'toggle tqdm visualization',
         False)
     return spec
예제 #31
0
파일: trainer.py 프로젝트: bearrundr/ELF
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addIntOption(
            'freq_update',
            'frequency of model update',
            1)
        spec.addBoolOption(
            'save_first',
            'save first model',
            False)
        spec.addIntOption(
            'num_games',
            'number of games',
            1024)
        spec.addIntOption(
            'batchsize',
            'batch size',
            128)

        spec.merge(Evaluator.get_option_spec('trainer'))
        spec.merge(ModelSaver.get_option_spec())

        return spec
예제 #32
0
파일: game.py 프로젝트: bearrundr/ELF
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addStrOption(
            'preload_sgf',
            'TODO: fill this help message in',
            '')
        spec.addIntOption(
            'preload_sgf_move_to',
            'TODO: fill this help message in',
            -1)
        spec.addBoolOption(
            'actor_only',
            'TODO: fill this help message in',
            False)
        spec.addStrListOption(
            'list_files',
            'Provide a list of json files for offline training',
            [])
        spec.addIntOption(
            'port',
            'TODO: fill this help message in',
            5556)
        spec.addStrOption(
            'server_addr',
            'TODO: fill this help message in',
            '')
        spec.addStrOption(
            'server_id',
            'TODO: fill this help message in',
            '')
        spec.addIntOption(
            'q_min_size',
            'TODO: fill this help message in',
            10)
        spec.addIntOption(
            'q_max_size',
            'TODO: fill this help message in',
            1000)
        spec.addIntOption(
            'num_reader',
            'TODO: fill this help message in',
            50)
        spec.addIntOption(
            'num_reset_ranking',
            'TODO: fill this help message in',
            5000)
        spec.addIntOption(
            'client_max_delay_sec',
            'Maximum amount of allowed delays in sec. If the client '
            'didn\'t respond after that, we think it is dead.',
            1200)
        spec.addBoolOption(
            'verbose',
            'TODO: fill this help message in',
            False)
        spec.addBoolOption(
            'keep_prev_selfplay',
            'TODO: fill this help message in',
            False)
        spec.addBoolOption(
            'print_result',
            'TODO: fill this help message in',
            False)
        spec.addIntOption(
            'data_aug',
            'specify data augumentation, 0-7, -1 mean random',
            -1)
        spec.addIntOption(
            'ratio_pre_moves',
            ('how many moves to perform in each thread, before we use the '
             'data to train the model'),
            0)
        spec.addFloatOption(
            'start_ratio_pre_moves',
            ('how many moves to perform in each thread, before we use the '
             'first sgf file to train the model'),
            0.5)
        spec.addIntOption(
            'num_games_per_thread',
            ('For offline mode, it is the number of concurrent games per '
             'thread, used to increase diversity of games; for selfplay mode, '
             'it is the number of games played at each thread, and after that '
             'we need to call restartAllGames() to resume.'),
            -1)
        spec.addIntOption(
            'expected_num_clients',
            'Expected number of clients',
            -1
        )
        spec.addIntOption(
            'num_future_actions',
            'TODO: fill this help message in',
            1)
        spec.addIntOption(
            'move_cutoff',
            'Cutoff ply in replay',
            -1)
        spec.addStrOption(
            'mode',
            'TODO: fill this help message in',
            'online')
        spec.addBoolOption(
            'black_use_policy_network_only',
            'TODO: fill this help message in',
            False)
        spec.addBoolOption(
            'white_use_policy_network_only',
            'TODO: fill this help message in',
            False)
        spec.addIntOption(
            'ply_pass_enabled',
            'TODO: fill this help message in',
            0)
        spec.addBoolOption(
            'use_mcts',
            'TODO: fill this help message in',
            False)
        spec.addBoolOption(
            'use_mcts_ai2',
            'TODO: fill this help message in',
            False)
        spec.addFloatOption(
            'white_puct',
            'PUCT for white when it is > 0.0. If it is -1 then we use'
            'the same puct for both side (specified by mcts_options).'
            'A HACK to use different puct for different model. Should'
            'be replaced by a more systematic approach.',
            -1.0)
        spec.addIntOption(
            'white_mcts_rollout_per_batch',
            'white mcts rollout per batch',
            -1)
        spec.addIntOption(
            'white_mcts_rollout_per_thread',
            'white mcts rollout per thread',
            -1)
        spec.addBoolOption(
            'use_df_feature',
            'TODO: fill this help message in',
            False)
        spec.addStrOption(
            'dump_record_prefix',
            'TODO: fill this help message in',
            '')
        spec.addIntOption(
            'policy_distri_cutoff',
            'TODO: fill this help message in',
            0)
        spec.addFloatOption(
            'resign_thres',
            'TODO: fill this help message in',
            0.0)
        spec.addBoolOption(
            'following_pass',
            'TODO: fill this help message in',
            False)
        spec.addIntOption(
            'selfplay_timeout_usec',
            'TODO: fill this help message in',
            0)
        spec.addIntOption(
            'gpu',
            'TODO: fill this help message in',
            -1)
        spec.addBoolOption(
            'policy_distri_training_for_all',
            'TODO: fill this help message in',
            False)
        spec.addBoolOption(
            'parameter_print',
            'TODO: fill this help message in',
            True)
        spec.addIntOption(
            'batchsize',
            'batch size',
            128)
        spec.addIntOption(
            'batchsize2',
            'batch size',
            -1)
        spec.addIntOption(
            'T',
            'number of timesteps',
            6)
        spec.addIntOption(
            'selfplay_init_num',
            ('Initial number of selfplay games to generate before training a '
             'new model'),
            2000)
        spec.addIntOption(
            'selfplay_update_num',
            ('Additional number of selfplay games to generate after a model '
             'is updated'),
            1000)
        spec.addBoolOption(
            'selfplay_async',
            ('Whether to use async mode in selfplay'),
            False)
        spec.addIntOption(
            'eval_num_games',
            ('number of evaluation to be performed to decide whether a model '
             'is better than the other'),
            400)
        spec.addFloatOption(
            'eval_winrate_thres',
            'Win rate threshold for evalution',
            0.55)
        spec.addIntOption(
            'eval_old_model',
            ('If specified, then we directly switch to evaluation mode '
             'between the loaded model and the old model specified by this '
             'switch'),
            -1)
        spec.addStrOption(
            'eval_model_pair',
            ('If specified for df_selfplay.py, then the two models will be '
             'evaluated on this client'),
            '')
        spec.addStrOption(
            'comment',
            'Comment for this run',
            '')
        spec.addBoolOption(
            'cheat_eval_new_model_wins_half',
            'When enabled, in evaluation mode, when the game '
            'finishes, the player with the most recent model gets 100% '
            'win rate half of the time.'
            'This is used to test the framework',
            False)
        spec.addBoolOption(
            'cheat_selfplay_random_result',
            'When enabled, in selfplay mode the result of the game is random'
            'This is used to test the framework',
            False)
        spec.addIntOption(
            'suicide_after_n_games',
            'return after n games have finished, -1 means it never ends',
            -1)

        spec.merge(PyOptionSpec.fromClasses((ContextArgs, MoreLabels)))

        return spec
예제 #33
0
 def get_option_spec(cls):
     spec = PyOptionSpec()
     spec.addIntOption('num_eval', 'number of games to evaluate', 500)
     spec.addBoolOption('tqdm', 'toggle tqdm visualization', False)
     return spec
예제 #34
0
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addIntOption('num_games', 'number of games', 1024)
        spec.addIntOption('batchsize', 'batch size', 128)
        spec.addIntOption('T', 'number of timesteps', 6)
        spec.addIntOption('mcts_threads', 'number of MCTS threads', 0)
        spec.addIntOption('mcts_rollout_per_batch',
                          'Batch size for mcts rollout', 1)
        spec.addIntOption('mcts_rollout_per_thread',
                          'number of rollotus per MCTS thread', 1)
        spec.addBoolOption('mcts_verbose', 'enables mcts verbosity', False)
        spec.addBoolOption('mcts_verbose_time',
                           'enables mcts verbosity for time stats', False)
        spec.addBoolOption('mcts_persistent_tree',
                           'use persistent tree in MCTS', False)
        spec.addBoolOption('mcts_use_prior', 'use prior in MCTS', False)
        spec.addIntOption('mcts_virtual_loss',
                          '"virtual" number of losses for MCTS edges', 0)
        spec.addStrOption('mcts_pick_method',
                          'criterion for mcts node selection', 'most_visited')
        spec.addFloatOption('mcts_puct', 'prior weight', 1.0)
        spec.addFloatOption(
            'mcts_epsilon',
            'for exploration enhancement, weight of randomization', 0.0)
        spec.addFloatOption(
            'mcts_alpha',
            'for exploration enhancement, alpha term in gamma distribution',
            0.0)
        spec.addBoolOption("mcts_unexplored_q_zero",
                           'set all unexplored node to have Q value zero',
                           False)
        spec.addBoolOption(
            "mcts_root_unexplored_q_zero",
            'set unexplored child of root node to have Q value zero', False)

        return spec
예제 #35
0
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addIntOption(
            'num_games',
            'number of games',
            1024)
        spec.addIntOption(
            'batchsize',
            'batch size',
            128)
        spec.addIntOption(
            'T',
            'number of timesteps',
            6)
        spec.addBoolOption(
            'verbose_comm',
            'enables verbose comm',
            False)
        spec.addIntOption(
            'mcts_threads',
            'number of MCTS threads',
            0)
        spec.addIntOption(
            'mcts_rollout_per_batch',
            'Batch size for mcts rollout',
            1)
        spec.addIntOption(
            'mcts_rollout_per_thread',
            'number of rollotus per MCTS thread',
            1)
        spec.addBoolOption(
            'mcts_verbose',
            'enables mcts verbosity',
            False)
        spec.addBoolOption(
            'mcts_verbose_time',
            'enables mcts verbosity for time stats',
            False)
        spec.addBoolOption(
            'mcts_persistent_tree',
            'use persistent tree in MCTS',
            False)
        spec.addBoolOption(
            'mcts_use_prior',
            'use prior in MCTS',
            False)
        spec.addIntOption(
            'mcts_virtual_loss',
            '"virtual" number of losses for MCTS edges',
            0)
        spec.addStrOption(
            'mcts_pick_method',
            'criterion for mcts node selection',
            'most_visited')
        spec.addFloatOption(
            'mcts_puct',
            'prior weight',
            1.0)
        spec.addFloatOption(
            'mcts_epsilon',
            'for exploration enhancement, weight of randomization',
            0.0)
        spec.addFloatOption(
            'mcts_alpha',
            'for exploration enhancement, alpha term in gamma distribution',
            0.0)
        spec.addBoolOption(
            "mcts_unexplored_q_zero",
            'set all unexplored node to have Q value zero',
            False)
        spec.addBoolOption(
            "mcts_root_unexplored_q_zero",
            'set unexplored child of root node to have Q value zero',
            False)

        return spec
예제 #36
0
파일: game.py 프로젝트: alatyshe/ELF
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addBoolOption('actor_only', 'TODO: fill this help message in',
                           False)
        spec.addStrListOption(
            'list_files', 'Provide a list of json files for offline training',
            [])
        spec.addIntOption('port', 'TODO: fill this help message in', 5556)
        spec.addStrOption('server_addr', 'TODO: fill this help message in', '')
        spec.addStrOption('server_id', 'TODO: fill this help message in', '')
        spec.addIntOption('q_min_size', 'TODO: fill this help message in', 10)
        spec.addIntOption('q_max_size', 'TODO: fill this help message in',
                          1000)
        spec.addIntOption('num_reader', 'TODO: fill this help message in', 50)
        spec.addIntOption('num_reset_ranking',
                          'TODO: fill this help message in', 5000)
        spec.addIntOption(
            'client_max_delay_sec',
            'Maximum amount of allowed delays in sec. If the client '
            'didn\'t respond after that, we think it is dead.', 1200)
        spec.addBoolOption('verbose', 'TODO: fill this help message in', False)
        spec.addBoolOption('keep_prev_selfplay',
                           'TODO: fill this help message in', False)
        spec.addIntOption(
            'num_games_per_thread',
            ('For offline mode, it is the number of concurrent games per '
             'thread, used to increase diversity of games; for selfplay mode, '
             'it is the number of games played at each thread, and after that '
             'we need to call restartAllGames() to resume.'), -1)
        spec.addIntOption('expected_num_clients', 'Expected number of clients',
                          -1)
        spec.addIntOption('checkers_num_future_actions',
                          'TODO: fill this help message in', 1)
        spec.addStrOption('mode', 'TODO: fill this help message in', 'play')
        spec.addBoolOption('black_use_policy_network_only',
                           'TODO: fill this help message in', False)
        spec.addBoolOption('white_use_policy_network_only',
                           'TODO: fill this help message in', False)
        spec.addBoolOption('use_mcts', 'TODO: fill this help message in',
                           False)
        spec.addBoolOption('use_mcts_ai2', 'TODO: fill this help message in',
                           False)
        spec.addFloatOption(
            'white_puct',
            'PUCT for white when it is > 0.0. If it is -1 then we use'
            'the same puct for both side (specified by mcts_options).'
            'A HACK to use different puct for different model. Should'
            'be replaced by a more systematic approach.', -1.0)
        spec.addIntOption('white_mcts_rollout_per_batch',
                          'white mcts rollout per batch', -1)
        spec.addIntOption('white_mcts_rollout_per_thread',
                          'white mcts rollout per thread', -1)
        spec.addStrOption('dump_record_prefix',
                          'TODO: fill this help message in', '')
        spec.addStrOption('selfplay_records_directory',
                          'TODO: fill this help message in', '')
        spec.addStrOption('eval_records_directory',
                          'TODO: fill this help message in', '')
        spec.addStrOption('records_buffer_directory',
                          'TODO: fill this help message in', '')
        spec.addIntOption('policy_distri_cutoff',
                          'first N moves will be randomly', 0)
        spec.addIntOption('selfplay_timeout_usec',
                          'TODO: fill this help message in', 0)
        spec.addIntOption('gpu', 'TODO: fill this help message in', -1)
        spec.addBoolOption('policy_distri_training_for_all',
                           'TODO: fill this help message in', False)
        spec.addBoolOption('parameter_print',
                           'TODO: fill this help message in', True)
        spec.addIntOption('batchsize', 'batch size', 128)
        spec.addIntOption('batchsize2', 'batch size', -1)
        spec.addIntOption('T', 'number of timesteps', 6)
        spec.addIntOption(
            'selfplay_init_num',
            ('Initial number of selfplay games to generate before training a '
             'new model'), 2000)
        spec.addIntOption(
            'selfplay_update_num',
            ('Additional number of selfplay games to generate after a model '
             'is updated'), 1000)
        spec.addBoolOption('selfplay_async',
                           ('Whether to use async mode in selfplay'), False)
        spec.addIntOption(
            'eval_num_games',
            ('number of evaluation to be performed to decide whether a model '
             'is better than the other'), 400)
        spec.addFloatOption('eval_winrate_thres',
                            'Win rate threshold for evalution', 0.55)
        spec.addIntOption(
            'eval_old_model',
            ('If specified, then we directly switch to evaluation mode '
             'between the loaded model and the old model specified by this '
             'switch'), -1)
        spec.addStrOption(
            'eval_model_pair',
            ('If specified for df_selfplay.py, then the two models will be '
             'evaluated on this client'), '')
        spec.addBoolOption(
            'cheat_eval_new_model_wins_half',
            'When enabled, in evaluation mode, when the game '
            'finishes, the player with the most recent model gets 100% '
            'win rate half of the time.'
            'This is used to test the framework', False)
        spec.addBoolOption(
            'cheat_selfplay_random_result',
            'When enabled, in selfplay mode the result of the game is random'
            'This is used to test the framework', False)
        spec.addBoolOption('human_plays_for_black', '', False)
        spec.addIntOption(
            'suicide_after_n_games',
            'return after n games have finished, -1 means it never ends', -1)

        spec.merge(PyOptionSpec.fromClasses((ContextArgs, MoreLabels)))
        return spec
예제 #37
0
    def get_option_spec(cls):
        spec = PyOptionSpec()
        spec.addStrOption('preload_sgf', 'TODO: fill this help message in', '')
        spec.addIntOption('preload_sgf_move_to',
                          'TODO: fill this help message in', -1)
        spec.addStrOption('mode', 'TODO: fill this help message in', "online")
        spec.addBoolOption('actor_only', 'TODO: fill this help message in',
                           False)
        spec.addIntOption('num_reset_ranking',
                          'TODO: fill this help message in', 5000)
        spec.addBoolOption('verbose', 'TODO: fill this help message in', False)
        spec.addBoolOption('print_result', 'TODO: fill this help message in',
                           False)
        spec.addIntOption('data_aug',
                          'specify data augumentation, 0-7, -1 mean random',
                          -1)
        spec.addIntOption(
            'num_games_per_thread',
            ('For offline mode, it is the number of concurrent games per '
             'thread, used to increase diversity of games; for selfplay mode, '
             'it is the number of games played at each thread, and after that '
             'we need to call restartAllGames() to resume.'), -1)
        spec.addIntOption('num_future_actions',
                          'TODO: fill this help message in', 1)
        spec.addIntOption('move_cutoff', 'Cutoff ply in replay', -1)
        spec.addStrOption('mode', 'TODO: fill this help message in', 'online')
        spec.addBoolOption('black_use_policy_network_only',
                           'TODO: fill this help message in', False)
        spec.addIntOption('ply_pass_enabled',
                          'TODO: fill this help message in', 0)
        spec.addBoolOption('use_mcts', 'TODO: fill this help message in',
                           False)
        spec.addBoolOption('use_df_feature', 'TODO: fill this help message in',
                           False)
        spec.addStrOption('dump_record_prefix',
                          'TODO: fill this help message in', '')
        spec.addFloatOption('resign_thres', 'TODO: fill this help message in',
                            0.0)
        spec.addBoolOption('following_pass', 'TODO: fill this help message in',
                           False)
        spec.addIntOption('gpu', 'TODO: fill this help message in', -1)
        spec.addBoolOption('parameter_print',
                           'TODO: fill this help message in', True)
        spec.addIntOption('batchsize', 'batch size', 128)
        spec.addIntOption('batchsize2', 'batch size', -1)
        spec.addFloatOption('eval_winrate_thres',
                            'Win rate threshold for evalution', 0.55)
        spec.addIntOption(
            'suicide_after_n_games',
            'return after n games have finished, -1 means it never ends', -1)

        spec.merge(PyOptionSpec.fromClasses((ContextArgs, MoreLabels)))

        return spec