コード例 #1
0
 def _trigger_epoch(self):
     if FEATURE:
         if self.epoch_num % 1 == 0:
             logger.info("update density network at epoch %d." %
                         (self.epoch_num))
             cfg = PredictConfig(session_init=JustCurrentSession(),
                                 model=Model(),
                                 input_var_names=['state'],
                                 output_var_names=[FEATURE])
             self.offline_predictor = get_predict_func(cfg)
コード例 #2
0
def run_atari_neptune_experiment(yamlFile=None, modelToLaod=None, epoch=None):
    global ENV_NAME, EXPERIMENT_MODEL, FRAME_HISTORY

    with open(yamlFile, 'r') as stream:
        try:
            yamlData = yaml.load(stream)
        except yaml.YAMLError as exc:
            print(exc)

    argsDict = {}
    for v in yamlData["parameters"]:
        argsDict[v["name"]] = v["default"]

    args = edict(argsDict)
    ENV_NAME = args.env
    assert ENV_NAME

    if hasattr(args, "frame_history"):
        FRAME_HISTORY = args.frame_history
        # examples.OpenAIGym.train_atari_with_neptune.FRAME_HISTORY = args.frame_history
    else:
        FRAME_HISTORY = 4

    # FRAME_HISTORY = int(get_atribute(args, "frame_history", 4))
    logger.info("Environment Name: {}".format(ENV_NAME))

    # module_name, function_name = ctx.params.featureExtractor.split(".")
    module_name = args.experimentModelClass[:args.experimentModelClass.
                                            rfind('.')]
    class_name = args.experimentModelClass[args.experimentModelClass.
                                           rfind('.') + 1:]
    experiment_model_class = importlib.import_module(
        module_name).__dict__[class_name]
    EXPERIMENT_MODEL = experiment_model_class(args.experimentModelParameters)

    p = get_player()
    del p  # set NUM_ACTIONS. Bloody hack!
    EXPERIMENT_MODEL.set_number_of_actions(NUM_ACTIONS)

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)

    cfg = PredictConfig(model=EXPERIMENT_MODEL,
                        session_init=SaverRestore(modelToLaod),
                        input_var_names=['state'],
                        output_var_names=['logits'])
    dump_dir = os.path.join(dump_dir_root, str(epoch))
    print "Writing to:{}".format(dump_dir)
    run_submission(cfg, dump_dir)
コード例 #3
0
 def _setup_graph(self):
     self.sess = self.trainer.sess
     self.async_predictor = MultiThreadAsyncPredictor(
             self.trainer.get_predict_funcs(['state'], ['logitsT', 'pred_value'],
             PREDICTOR_THREAD), batch_size=15)
     # else:
     #     self.async_predictor = MultiThreadAsyncPredictor(
     #         self.trainer.get_predict_funcs(['state'], ['logitsT', 'pred_value', FEATURE],
     #                                        PREDICTOR_THREAD), batch_size=15)
     if FEATURE:
         logger.info("Initialize density network")
         cfg = PredictConfig(
                 session_init=NewSession(),
                 model=Model(),
                 input_var_names=['state'],
                 output_var_names=[FEATURE])
         self.offline_predictor = get_predict_func(cfg)
     self.async_predictor.run()
コード例 #4
0
            "Conv2DBackpropFilter": "MKL",
            "Conv2DBackpropInput": "MKL"
        }
    else:
        print "using tensorflow convolution"
        label_map = {}
    with ops.Graph().as_default() as g:
        tf.set_random_seed(my_task_index)
        np.random.seed(my_task_index)
        with g._kernel_label_map(label_map):
            with tf.device('/job:worker/task:{}/cpu:0'.format(my_task_index)):
                with tf.variable_scope(tf.get_variable_scope(), reuse=None):
                    if args.task != 'train':
                        cfg = PredictConfig(
                            model=Model('/job:ps/task:0/cpu:0'),
                            session_init=SaverRestore(args.load),
                            input_var_names=['state'],
                            output_var_names=['logits:0'])
                        if args.task == 'play':
                            play_model(cfg)
                        elif args.task == 'eval':
                            eval_model_multithread(cfg, EVAL_EPISODE)
                    else:
                        nr_towers = args.nr_towers
                        predict_towers = args.nr_predict_towers * [
                            0,
                        ]

                        if args.cpu != 1:
                            nr_gpu = get_nr_gpu()
                            if nr_gpu > 1:
コード例 #5
0
        DUELING = False
    else:
        logger.error("dueling argument must be t or f")

    if DOUBLE:
        logger.info("Using Double")
    if DUELING:
        logger.info("Using Dueling")

    assert ENV_NAME
    logger.info("Environment Name: {}".format(ENV_NAME))
    p = get_player()
    del p  # set NUM_ACTIONS

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    cfg = PredictConfig(model=Model(),
                        session_init=SaverRestore(args.load),
                        input_var_names=['state'],
                        output_var_names=['Qvalue'])

    s_cfg = PredictConfig(model=Model(),
                          session_init=SaverRestore(args.load),
                          input_var_names=['state'],
                          output_var_names=['saliency'])

    run(cfg, s_cfg, args.output)
    #run_submission(cfg, args.output, args.episode)
    #do_submit(args.output, args.api)
コード例 #6
0
            break

    print float(det_cnt) / cnt


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
    parser.add_argument('--load', help='load model', required=True)
    parser.add_argument('--env', help='environment name', required=True)
    parser.add_argument('--split', help='split name', required=True)
    args = parser.parse_args()

    ENV_NAME = args.env
    SPLIT_NAME = args.split
    assert ENV_NAME
    assert SPLIT_NAME
    logger.info("Environment Name: {}".format(ENV_NAME))
    logger.info("Split Name: {}".format(SPLIT_NAME))
    p = get_player()
    del p  # set NUM_ACTIONS

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    cfg = PredictConfig(model=Model(),
                        session_init=SaverRestore(args.load),
                        input_names=['state'],
                        output_names=['logits', 'rnn_state'])
    run_submission(cfg)
コード例 #7
0
    parser.add_argument('-o','--output', help='output directory', default='gym-submit')
    parser.add_argument('-k','--key', help='api key')
    args=parser.parse_args()
    ENV_NAME = args.env
    assert ENV_NAME
    p = get_player(); del p     # set NUM_ACTIONS

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    if args.task != 'train':
        assert args.load is not None

    if args.task != 'train':
        cfg = PredictConfig(
                model=Model(),
                session_init=SaverRestore(args.load),
                input_var_names=['state'],
                output_var_names=['fct/output:0'])
        if args.task == 'play':
            play_model(cfg)
        elif args.task == 'eval':
            eval_model_multithread(cfg, EVAL_EPISODE)
        elif args.task == 'run':
            run_submission(cfg, args.output, args.episode)
            do_submit(args.output, args.key)
    else:
        config = get_config()
        if args.load:
            config.session_init = SaverRestore(args.load)
        QueueInputTrainer(config).train()