Esempio n. 1
0
def play_model(model_path, romfile):
    player = AtariPlayer(AtariDriver(romfile, viz=0.01),
                         action_repeat=ACTION_REPEAT)
    global NUM_ACTIONS
    NUM_ACTIONS = player.driver.get_num_actions()

    M = Model()
    cfg = PredictConfig(model=M,
                        input_data_mapping=[0],
                        session_init=SaverRestore(model_path),
                        output_var_names=['fct/output:0'])
    predfunc = get_predict_func(cfg)
    tot_reward = 0
    que = deque(maxlen=30)
    while True:
        s = player.current_state()
        outputs = predfunc([[s]])
        action_value = outputs[0][0]
        act = action_value.argmax()
        print action_value, act
        if random.random() < 0.01:
            act = random.choice(range(player.driver.get_num_actions()))
        if len(que) == que.maxlen \
                and que.count(que[0]) == que.maxlen:
            act = 1
        que.append(act)
        print(act)
        reward, isOver = player.action(act)
        tot_reward += reward
        if isOver:
            print("Total:", tot_reward)
            tot_reward = 0
Esempio n. 2
0
def run_test(path, input):
    param_dict = np.load(path).item()

    pred_config = PredictConfig(
        model=Model(),
        input_data_mapping=[0],
        session_init=ParamRestore(param_dict),
        output_var_names=['output:0']   # output:0 is the probability distribution
    )
    predict_func = get_predict_func(pred_config)

    import cv2
    im = cv2.imread(input)
    assert im is not None
    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    im = cv2.resize(im, (227, 227))
    im = np.reshape(im, (1, 227, 227, 3)).astype('float32')
    im = im - 110
    outputs = predict_func([im])[0]
    prob = outputs[0]
    ret = prob.argsort()[-10:][::-1]
    print ret

    meta = ILSVRCMeta().get_synset_words_1000()
    print [meta[k] for k in ret]
Esempio n. 3
0
def play_model(model_path, romfile):
    player = AtariPlayer(AtariDriver(romfile, viz=0.01),
            action_repeat=ACTION_REPEAT)
    global NUM_ACTIONS
    NUM_ACTIONS = player.driver.get_num_actions()

    M = Model()
    cfg = PredictConfig(
            model=M,
            input_data_mapping=[0],
            session_init=SaverRestore(model_path),
            output_var_names=['fct/output:0'])
    predfunc = get_predict_func(cfg)
    tot_reward = 0
    que = deque(maxlen=30)
    while True:
        s = player.current_state()
        outputs = predfunc([[s]])
        action_value = outputs[0][0]
        act = action_value.argmax()
        print action_value, act
        if random.random() < 0.01:
            act = random.choice(range(player.driver.get_num_actions()))
        if len(que) == que.maxlen \
                and que.count(que[0]) == que.maxlen:
            act = 1
        que.append(act)
        print(act)
        reward, isOver = player.action(act)
        tot_reward += reward
        if isOver:
            print("Total:", tot_reward)
            tot_reward = 0
Esempio n. 4
0
def run_test(path, input):
    param_dict = np.load(path).item()

    pred_config = PredictConfig(
        model=Model(),
        input_data_mapping=[0],
        session_init=ParamRestore(param_dict),
        output_var_names=['output:0', 'pool5/MaxPool:0'])
    predict_func = get_predict_func(pred_config)

    im = cv2.imread(input)
    assert im is not None
    im = im.astype('float32')
    im = cv2.resize(im, (224, 224)).reshape((1, 224, 224, 3))
    im = im - 110
    raw_out = predict_func([im])
    tfout = raw_out[1][0]

    from tensorio import read_value
    dumpf = 'dump.tensortxt'
    with open(dumpf) as f:
        name, arr = read_value(f)
    os.unlink(dumpf)
    hout = arr[:, :, :, 0]
    diff = hout - tfout
    maxdiff = np.abs(diff).max()
    print "Diff:", maxdiff
    assert maxdiff < 1e-3
    return

    prob = raw_out[0][0]
    ret = prob.argsort()[-10:][::-1]
    print ret
    meta = ILSVRCMeta().get_synset_words_1000()
    print[meta[k] for k in ret]
Esempio n. 5
0
def run_test(path, input):
    param_dict = np.load(path).item()

    pred_config = PredictConfig(
        model=Model(),
        input_var_names=['input'],
        session_init=ParamRestore(param_dict),
        session_config=get_default_sess_config(0.9),
        output_var_names=['output']  # output:0 is the probability distribution
    )
    predict_func = get_predict_func(pred_config)

    import cv2
    im = cv2.imread(input)
    assert im is not None
    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
    im = cv2.resize(im, (224, 224))
    im = np.reshape(im, (1, 224, 224, 3)).astype('float32')
    im = im - 110
    outputs = predict_func([im])[0]
    prob = outputs[0]
    ret = prob.argsort()[-10:][::-1]
    print(ret)

    meta = ILSVRCMeta().get_synset_words_1000()
    print([meta[k] for k in ret])
Esempio n. 6
0
def eval_model_singlethread(cfg, nr_eval):
    func = get_predict_func(cfg)
    player = get_player(train=False)
    scores = []
    for _ in tqdm(range(nr_eval)):
        score = play_one_episode(player, func, cfg.model.predict_value)
        scores.append(score)
    logger.info("Average Score: {}; Max Score: {}".format(
        sum(scores) / float(len(scores), max(scores))))
Esempio n. 7
0
def assemble_func(config_module, checkpoint_path):
    model = config_module.Model()
    pred_config = PredictConfig(
        model=model,
        input_data_mapping=[0, 1],
        session_init=SaverRestore(checkpoint_path),
        output_var_names=['pred'],
    )
    predict_func = get_predict_func(pred_config)
    return predict_func
Esempio n. 8
0
def play_model(model_path):
    player = get_player(viz=0.01)
    cfg = PredictConfig(
            model=Model(),
            input_data_mapping=[0],
            session_init=SaverRestore(model_path),
            output_var_names=['fct/output:0'])
    predfunc = get_predict_func(cfg)
    while True:
        score = play_one_episode(player, predfunc)
        print("Total:", score)
Esempio n. 9
0
def eval_model_multithread(model_path):
    cfg = PredictConfig(
            model=Model(),
            input_data_mapping=[0],
            session_init=SaverRestore(model_path),
            output_var_names=['fct/output:0'])
    p = get_player(); del p # set NUM_ACTIONS
    func = get_predict_func(cfg)
    NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
    mean, max = eval_with_funcs([func] * NR_PROC)
    logger.info("Average Score: {}; Max Score: {}".format(mean, max))
Esempio n. 10
0
def assemble_func(config_module, checkpoint_path):
    model = config_module.Model()
    pred_config = PredictConfig(
        model=model,
        input_var_names=['input'],
        session_init=SaverRestore(checkpoint_path),
        session_config=get_default_sess_config(0.5),
        output_var_names=['score', 'boxes'],
    )
    predict_func = get_predict_func(pred_config)
    return predict_func
Esempio n. 11
0
def run_test(path, input):
    param_dict = np.load(path).item()

    pred_config = PredictConfig(
        model=Model(),
        input_data_mapping=[0],
        session_init=ParamRestore(param_dict),
        output_var_names=['output:0', 'pool5/MaxPool:0']
    )
    predict_func = get_predict_func(pred_config)

    im = cv2.imread(input)
    assert im is not None
    im = im.astype('float32')
    im = cv2.resize(im, (224, 224)).reshape((1,224,224,3))
    im = im - 110
    raw_out = predict_func([im])
    tfout = raw_out[1][0]

    from tensorio import read_value
    dumpf = 'dump.tensortxt'
    with open(dumpf) as f:
        name, arr = read_value(f)
    os.unlink(dumpf)
    hout = arr[:,:,:,0]
    diff = hout - tfout
    maxdiff = np.abs(diff).max()
    print "Diff:", maxdiff
    assert maxdiff < 1e-3
    return

    prob = raw_out[0][0]
    ret = prob.argsort()[-10:][::-1]
    print ret
    meta = ILSVRCMeta().get_synset_words_1000()
    print [meta[k] for k in ret]
Esempio n. 12
0
def eval_model_multithread(cfg, nr_eval):
    func = get_predict_func(cfg)
    NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
    mean, max = eval_with_funcs([func] * NR_PROC, nr_eval)
    logger.info("Average Score: {}; Max Score: {}".format(mean, max))
Esempio n. 13
0
                config.nr_tower = len(args.gpu.split(','))
            QueueInputTrainer(config).train()

        with tf.Graph().as_default():
            with tf.device('/cpu:0'):
                pred_config = PredictConfig(
                    model=Model(n=5),
                    input_data_mapping=[0],
                    session_init=SaverRestore(
                        "/home/ubuntu/tensorpack/examples/ResNet/train2_log{iteration}/Kaggle-Resnet-Strat/model-{mod}"
                        .format(iteration=iteration, mod=str(680))),
                    output_var_names=[
                        'output:0'
                    ]  # output:0 is the probability distribution
                )
            if args.gpu:
                config.nr_tower = len(args.gpu.split(','))

            predict_func = get_predict_func(pred_config)

            preds = []

            for i in xrange(0, len(cm.ds.X_test), 1000):
                x = cm.ds.X_test[i:i + 1000]
                preds.append(predict_func([x])[0])

            preds = np.vstack(preds)
            results.append(preds)

    results = np.mean(results, axis=0)
    write_submission(results, cm.ds.X_test_ids, "sub2.csv")
Esempio n. 14
0
def eval_model_multithread(cfg, nr_eval):
    func = get_predict_func(cfg)
    NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
    mean, max = eval_with_funcs([func] * NR_PROC, nr_eval)
    logger.info("Average Score: {}; Max Score: {}".format(mean, max))
Esempio n. 15
0
def play_model(cfg):
    player = get_player(viz=0.01)
    predfunc = get_predict_func(cfg)
    while True:
        score = play_one_episode(player, predfunc)
        print("Total:", score)
Esempio n. 16
0
def play_model(cfg):
    player = get_player(viz=0.01)
    predfunc = get_predict_func(cfg)
    while True:
        score = play_one_episode(player, predfunc)
        print("Total:", score)
Esempio n. 17
0
            if args.load:
                config.session_init = SaverRestore(args.load)
            if args.gpu:
                config.nr_tower = len(args.gpu.split(','))
            QueueInputTrainer(config).train()
                
        with tf.Graph().as_default():
            with tf.device('/cpu:0'):
                pred_config = PredictConfig(
                model=Model(n=5),
                input_data_mapping=[0],
                session_init=SaverRestore("/home/ubuntu/tensorpack/examples/ResNet/train2_log{iteration}/Kaggle-Resnet-Strat/model-{mod}".format(iteration=iteration,mod=str(680))),
                output_var_names=['output:0']   # output:0 is the probability distribution
            )
            if args.gpu:
                config.nr_tower = len(args.gpu.split(','))

            predict_func = get_predict_func(pred_config)
            
            preds = []

            for i in xrange(0, len(cm.ds.X_test), 1000):
                x = cm.ds.X_test[i:i+1000]
                preds.append(predict_func([x])[0]
                         )

            preds = np.vstack(preds)
            results.append(preds)

    results = np.mean(results, axis=0 )
    write_submission(results, cm.ds.X_test_ids, "sub2.csv")