def thread_function(self):
        """
        Run on secondary thread
        """
        pred = OfflinePredictor(
            PredictConfig(model=Model(IMAGE_SIZE, FRAME_HISTORY, self.METHOD,
                                      self.NUM_ACTIONS, GAMMA, ""),
                          session_init=get_model_loader(self.fname_model.name),
                          input_names=['state'],
                          output_names=['Qvalue']))

        # demo pretrained model one episode at a time
        if self.task_value == 'Play':
            play_n_episodes(get_player(files_list=self.selected_list,
                                       viz=0.01,
                                       data_type=self.window.usecase,
                                       saveGif=self.GIF_value,
                                       saveVideo=self.video_value,
                                       task='play'),
                            pred,
                            self.num_files,
                            viewer=self.window)
        # run episodes in parallel and evaluate pretrained model
        elif self.task_value == 'Evaluation':
            play_n_episodes(get_player(files_list=self.selected_list,
                                       viz=0.01,
                                       data_type=self.window.usecase,
                                       saveGif=self.GIF_value,
                                       saveVideo=self.video_value,
                                       task='eval'),
                            pred,
                            self.num_files,
                            viewer=self.window)
Beispiel #2
0
def test_only(args):
    from imageio import imsave
    data_folder = args.get("data_folder")
    test_ckpt = args.get("test_ckpt")
    test_folder = args.get("test_folder")
    if not os.path.exists(test_folder):
        os.makedirs(test_folder)
    image_size = 224
    pred_config = PredictConfig(
        model=ProgressiveSynTex(args),
        session_init=SmartInit(test_ckpt),
        input_names=["pre_image_input", "image_target"],
        output_names=['stages-target/viz', 'loss_output']
    )
    predictor = OfflinePredictor(pred_config)
    test_ds = get_data(data_folder, image_size, isTrain=False)
    test_ds.reset_state()
    idx = 0
    losses = list()
    print("------------------ predict --------------")
    for pii, it in test_ds:
        output_array, loss_output = predictor(pii, it)
        if output_array.ndim == 4:
            for i in range(output_array.shape[0]):
                imsave(os.path.join(test_folder, "test-{}.jpg".format(idx)), output_array[i])
                idx += 1
        else:
            imsave(os.path.join(test_folder, "test-{}.jpg".format(idx)), output_array)
            idx += 1
        losses.append(loss_output)
        print("loss #", idx, "=", loss_output)
    print("Test and save", idx, "images to", test_folder, "avg loss =", np.mean(losses))
Beispiel #3
0
def init(args=None, is_running=0, pt=None):
    global ckpt2
    # 网络
    model = Net2()
    if is_running == 1:
        if pt == "":
            ckpt2 = tf.train.latest_checkpoint(logdir2)
        else:
            ckpt2 = '{}/{}'.format(logdir2, pt)
    else:
        ckpt2 = '{}/{}'.format(
            logdir2,
            args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir2)
    session_inits = []
    if ckpt2:
        session_inits.append(SaverRestore(ckpt2))
    pred_conf = PredictConfig(
        model=model,
        input_names=['x_ppgs', 'x_mfccs', 'y_spec', 'y_mel'],
        output_names=['pred_spec', "ppgs"],
        session_init=ChainInit(session_inits))
    global predictor
    predictor = OfflinePredictor(pred_conf)
    if is_running == 1:
        return jsonify({"code": 0, "ckpt": ckpt2})
 def _make_pred_func(self, load):
     from train import ResNetFPNTrackModel
     pred_model = ResNetFPNTrackModel()
     predcfg = PredictConfig(
         model=pred_model,
         session_init=get_model_loader(load),
         input_names=pred_model.get_inference_tensor_names()[0],
         output_names=pred_model.get_inference_tensor_names()[1])
     return OfflinePredictor(predcfg)
 def load_model(self):
     print('Loading Model...')
     model_path = self.model_path
     model_constructor = self.get_model()
     pred_config = PredictConfig(model=model_constructor(
         self.nr_types, self.input_shape, self.mask_shape, self.input_norm),
                                 session_init=get_model_loader(model_path),
                                 input_names=self.input_tensor_names,
                                 output_names=self.output_tensor_names)
     self.predictor = OfflinePredictor(pred_config)
Beispiel #6
0
def get_model(model, ckpt_name, option):
    model_path = ospj('train_log', option.log_dir, ckpt_name)
    ds = get_data('val', option)
    pred_config = PredictConfig(
        model=model,
        session_init=get_model_loader(model_path),
        input_names=['input', 'label', 'bbox'],
        output_names=['wrong-top1', 'top5', 'actmap', 'grad'],
        return_input=True)

    return SimpleDatasetPredictor(pred_config, ds)
Beispiel #7
0
    def __init__(self, game_name, config, global_model_data_path, local_test_flag):
        if not local_test_flag:
            mkdirs(global_model_data_path+config.DRL.Learn.data_save_path)
        self.game_name = game_name
        self.data_save_path = global_model_data_path+config.DRL.Learn.data_save_path
        self.config = config
        self.global_iter = 0
        self.ckpt_dir = global_model_data_path+self.config.DRL.Learn.ckpt_dir
        self.ckpt_save_iter = self.config.DRL.Learn.ckpt_save_iter
        if not local_test_flag:
            mkdirs(self.ckpt_dir)

        self.apply_prioritize_memory = False
        if self.apply_prioritize_memory:
            self.memory = PrioritizedReplay(capacity=self.config.DRL.Learn.replay_memory_size)
        else:
            # store the previous observations in replay memory
            self.memory = deque()

        if self.game_name == 'flappybird':  # FlappyBird applies pytorch
            use_cuda = config.DRL.Learn.cuda and torch.cuda.is_available()
            self.device = 'cuda' if use_cuda else 'cpu'
            self.actions_number = self.config.DRL.Learn.actions
            # self.nn = DeepQNetwork().to(self.device)
            self.nn = FlappyBirdDQN().to(self.device)
            self.optim = optim.Adam(self.nn.parameters(), lr=self.config.DRL.Learn.learning_rate)
            if config.DRL.Learn.ckpt_load:
                self.load_checkpoint(model_name='flappy_bird_model')
            self.game_state = FlappyBird()
            if torch.cuda.is_available():
                torch.cuda.manual_seed(123)
            else:
                torch.manual_seed(123)
        elif self.game_name == 'Assault-v0' or self.game_name == 'Breakout-v0' or self.game_name == 'SpaceInvaders-v0':
            if self.game_name == 'Assault-v0':
                game_model_name = 'Assault-v0.tfmodel'
            elif self.game_name == 'Breakout-v0':
                game_model_name = 'Breakout-v0.npz'
            elif self.game_name == 'SpaceInvaders-v0':
                game_model_name = 'SpaceInvaders-v0.tfmodel'
            else:
                raise ValueError('Unknown game name {0}'.format(self.game_name))

            self.env = self.get_player_atari(train=False)
            num_actions = self.env .action_space.n
            self.nn = OfflinePredictor(PredictConfig(
                # model=Model(),
                model=Model(num_actions=num_actions, image_size=(84, 84)),
                session_init=SmartInit(self.ckpt_dir+game_model_name),
                input_names=['state'],
                output_names=['policy', 'pred_value']))

            self.config.DRL.Learn.actions = num_actions
Beispiel #8
0
    def __init__(self, name, need_network=True, need_img=True, model="best"):
        super().__init__(name=name, is_deterministic=True)
        self._resizer = CustomResize(cfg.PREPROC.TEST_SHORT_EDGE_SIZE,
                                     cfg.PREPROC.MAX_SIZE)
        self._prev_box = None
        self._ff_gt_feats = None
        self._need_network = need_network
        self._need_img = need_img
        self._rotated_bbox = None

        if need_network:
            logger.set_logger_dir(
                "/tmp/test_log_/" + str(random.randint(0, 10000)), 'd')
            if model == "best":
                load = "train_log/hard_mining3/model-1360500"
            elif model == "nohardexamples":
                load = "train_log/condrcnn_all_2gpu_lrreduce2/model-1200500"
            elif model == "newrpn":
                load = "train_log/newrpn1/model"
            elif model == "resnet50_nohardexamples":
                load = "train_log/condrcnn_all_resnet50/model-1200500"
                cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]
            elif model == "resnet50":
                load = "train_log/hard_mining3_resnet50/model-1360500"
                cfg.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3]
            elif model == "gotonly":
                load = "train_log/hard_mining3_onlygot/model-1361000"
            elif model.startswith("checkpoint:"):
                load = model.replace("checkpoint:", "")
            else:
                assert False, ("unknown model", model)
            from dataset import DetectionDataset
            # init tensorpack model
            # cfg.freeze(False)
            DetectionDataset(
            )  # initialize the config with information from our dataset

            cfg.EXTRACT_GT_FEATURES = True
            cfg.MODE_TRACK = False
            extract_model = ResNetFPNModel()
            extract_ff_feats_cfg = PredictConfig(
                model=extract_model,
                session_init=get_model_loader(load),
                input_names=['image', 'roi_boxes'],
                output_names=['rpn/feature'])
            finalize_configs(is_training=False)
            self._extract_func = OfflinePredictor(extract_ff_feats_cfg)

            cfg.EXTRACT_GT_FEATURES = False
            cfg.MODE_TRACK = True
            cfg.USE_PRECOMPUTED_REF_FEATURES = True
            self._pred_func = self._make_pred_func(load)
 def __init__(self):
     super().__init__(name='ArgmaxTracker', is_deterministic=True)
     self._ref_img = None
     self._ref_bbox = None
     self._prev_box = None
     model = self._init_model()
     load = "train_log/condrcnn_onlygot/model-460000"
     predcfg = PredictConfig(
         model=model,
         session_init=get_model_loader(load),
         input_names=model.get_inference_tensor_names()[0],
         output_names=model.get_inference_tensor_names()[1])
     self._pred_func = OfflinePredictor(predcfg)
Beispiel #10
0
def _test():
    import numpy as np
    # from tensorpack.tfutils import TowerContext
    from tensorpack import PredictConfig, OfflinePredictor

    pretrained = False

    models = [
        resnet10,
        resnet12,
        resnet14,
        resnet16,
        resnet18_wd4,
        resnet18_wd2,
        resnet18_w3d4,
        resnet18,
        resnet34,
        resnet50,
        resnet50b,
        resnet101,
        resnet101b,
        resnet152,
        resnet152b,
        resnet200,
        resnet200b,
        seresnet18,
        seresnet34,
        seresnet50,
        seresnet50b,
        seresnet101,
        seresnet101b,
        seresnet152,
        seresnet152b,
        seresnet200,
        seresnet200b,
    ]

    for model in models:

        net = model(pretrained=pretrained)

        pred_config = PredictConfig(session_init=None,
                                    model=net,
                                    input_names=['input'],
                                    output_names=['label'])

        pred = OfflinePredictor(pred_config)
        img = np.zeros((224, 224, 3), np.uint8)
        prediction = pred([img])[0]
        print(prediction)
        pass
Beispiel #11
0
    def prepare_sampling(self):
        """Prepare model for generate samples."""
        if self.model is None:
            self.model = self.get_model(training=False)
        else:
            self.model.training = False

        predict_config = PredictConfig(
            session_init=SaverRestore(self.restore_path),
            model=self.model,
            input_names=['z'],
            output_names=['gen/gen', 'z'],
        )

        self.simple_dataset_predictor = SimpleDatasetPredictor(
            predict_config, RandomZData((self.batch_size, self.z_dim)))
Beispiel #12
0
def test(args):
    from imageio import imsave
    from tictoc import Timer
    data_folder = args.get("data_folder")
    image_size = args.get("image_size")
    batch_size = args.get("batch_size") or BATCH
    test_ckpt = args.get("test_ckpt")
    test_folder = args.get("test_folder")
    if not os.path.exists(test_folder):
        os.makedirs(test_folder)
    pred_config = PredictConfig(
        model=Style2PO(args),
        session_init=SmartInit(test_ckpt),
        input_names=["image_input", "image_target"],
        output_names=['stages-target/viz', 'loss_output'])
    predictor = OfflinePredictor(pred_config)
    zmin, zmax = (0, 1) if args.get("act_input") == "identity" else (-1, 1)
    test_ds = get_data(data_folder, image_size, False, zmin, zmax, batch_size)
    test_ds.reset_state()
    idx = 0
    losses = list()
    print("------------------ predict --------------")
    timer = Timer("predict", tic=True, show=Timer.STDOUT)
    for rz, it in test_ds:
        output_array, loss_output = predictor(rz, it)
        if output_array.ndim == 4:
            for i in range(output_array.shape[0]):
                imsave(os.path.join(test_folder, "test-{}.jpg".format(idx)),
                       output_array[i])
                idx += 1
        else:
            imsave(os.path.join(test_folder, "test-{}.jpg".format(idx)),
                   output_array)
            idx += 1
        losses.append(loss_output)
        print("loss #", idx, "=", loss_output)
    timer.toc(Timer.STDOUT)
    print("Test and save", idx, "images to", test_folder, "avg loss =",
          np.mean(losses))
Beispiel #13
0
def export_eval_protobuf_model(checkpoint_dir, model_name, dataset, quant_type,
                               output_file, batch_size):
    _, test_data, (img_shape, label_shape) = datasets.DATASETS[dataset]()

    model_func, input_spec, output_spec = get_model_func(
        "eval", model_name, quant_type, img_shape, label_shape[0])
    input_names = [i.name for i in input_spec]
    output_names = [o.name for o in output_spec]
    predictor_config = PredictConfig(session_init=SaverRestore(checkpoint_dir +
                                                               "/checkpoint"),
                                     tower_func=model_func,
                                     input_signature=input_spec,
                                     input_names=input_names,
                                     output_names=output_names,
                                     create_graph=False)

    print("Exporting optimised protobuf graph...")
    K.set_learning_phase(False)
    ModelExporter(predictor_config).export_compact(output_file, optimize=False)

    K.clear_session()
    pred = OfflinePredictor(predictor_config)

    test_data = BatchData(test_data, batch_size, remainder=True)
    test_data.reset_state()

    num_correct = 0
    num_processed = 0
    for img, label in tqdm(test_data):
        num_correct += sum(pred(img)[0].argmax(axis=1) == label.argmax(axis=1))
        num_processed += img.shape[0]

    print("Exported model has accuracy {:.4f}".format(num_correct /
                                                      num_processed))

    return input_names, output_names, {i.name: i.shape for i in input_spec}
Beispiel #14
0
    def gen_pred_config(self):
        if self.inf_auto_find_chkpt:
            self.inf_model_path = os.path.join(
                self.save_dir,
                str(
                    max([
                        int(x) for x in [
                            name for name in os.listdir(self.save_dir)
                            if os.path.isdir(os.path.join(self.save_dir, name))
                        ]
                    ])),
            )
            print(f"Inference model path: <{self.inf_model_path}>")
            print(
                '-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison'
                % (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.inf_model_path,
                                               self.inf_auto_metric,
                                               self.inf_auto_comparator)
            print("Selecting: %s" % model_path)
            print("Having Following Statistics:")
            for key, value in stat.items():
                print("\t%s: %s" % (key, value))
            sess = get_model_loader(model_path)
        else:
            model_path = self.inf_model_path
            sess = SaverRestoreRelaxed(self.inf_model_path)

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model=model_constructor(),
            session_init=sess,
            input_names=self.eval_inf_input_tensor_names,
            output_names=self.eval_inf_output_tensor_names,
        )
        return pred_config
Beispiel #15
0
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    # ROM_FILE = args.rom
    METHOD = args.algo
    # set num_actions
    init_player = MedicalPlayer(directory=data_dir,
                                files_list=test_list,
                                screen_dims=IMAGE_SIZE,
                                spacing=SPACING)
    NUM_ACTIONS = init_player.action_space.n
    num_validation_files = init_player.files.num_files

    if args.task != 'train':
        assert args.load is not None
        pred = OfflinePredictor(PredictConfig(
            model=Model(),
            session_init=get_model_loader(args.load),
            input_names=['state'],
            output_names=['Qvalue']))
        if args.task == 'play':
            t0 = time.time()
            play_n_episodes(get_player(directory=data_dir,
                                       files_list=test_list, viz=0.01,
                                       saveGif=args.saveGif,
                                       saveVideo=args.saveVideo),
                            pred, num_validation_files)

            t1 = time.time()
            print(t1-t0)
        elif args.task == 'eval':
            eval_model_multithread(pred, EVAL_EPISODE, get_player)
    else:
Beispiel #16
0
    ##########################################################
    #initialize states and Qvalues for the various agents
    state_names = []
    qvalue_names = []
    for i in range(0, args.agents):
        state_names.append('state_{}'.format(i))
        qvalue_names.append('Qvalue_{}'.format(i))

############################################################

    if args.task != 'train':
        assert args.load is not None
        pred = OfflinePredictor(
            PredictConfig(model=Model(agents=args.agents),
                          session_init=get_model_loader(args.load),
                          input_names=state_names,
                          output_names=qvalue_names))
        # demo pretrained model one episode at a time
        if args.task == 'play':
            play_n_episodes(get_player(files_list=args.files,
                                       viz=0.01,
                                       saveGif=args.saveGif,
                                       saveVideo=args.saveVideo,
                                       task='play',
                                       agents=args.agents),
                            pred,
                            num_files,
                            agents=args.agents,
                            reward_strategy=args.reward_strategy)
        # run episodes in parallel and evaluate pretrained model
        elif args.task == 'eval':
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('image_path')
    parser.add_argument('--model_path', default='log/checkpoint')
    parser.add_argument('--output_path', default='figures/')
    parser.add_argument('--size', type=int, default=32)
    args = parser.parse_args()

    np.random.seed(0)
    # initialize the model
    predict_func = OfflinePredictor(
        PredictConfig(inputs_desc=[
            InputDesc(tf.float32, [None, INPUT_SIZE, INPUT_SIZE, 2],
                      'input_image')
        ],
                      tower_func=model.feedforward,
                      session_init=SaverRestore(args.model_path),
                      input_names=['input_image'],
                      output_names=['prob']))

    # simulate suda's gridworld input
    image = cv2.imread(
        args.image_path,
        cv2.IMREAD_GRAYSCALE)  # 0 if obstacle, 255 if free space
    h, w = image.shape[:2]
    obj = img2obj(image)  # list containing row major indices of objects

    # specify position is recent memory
    radius = 6
    #s = [340/2, 110/2]  # needs to be a list
    s = [131, 147, 162]
Beispiel #18
0
    METHOD = args.algo
    # load files into env to set num_actions, num_validation_files
    init_player = MedicalPlayer(
        files_list=args.files,  #files_list=files_list,
        data_type=args.type,
        screen_dims=IMAGE_SIZE,
        task='play')
    NUM_ACTIONS = init_player.action_space.n
    num_files = init_player.files.num_files

    if args.task != 'train':
        assert args.load is not None
        pred = OfflinePredictor(
            PredictConfig(model=Model(IMAGE_SIZE, FRAME_HISTORY, METHOD,
                                      NUM_ACTIONS, GAMMA, args.trainable),
                          session_init=get_model_loader(args.load),
                          input_names=['state'],
                          output_names=['Qvalue']))
        # demo pretrained model one episode at a time
        if args.task == 'play':
            play_n_episodes(get_player(files_list=args.files,
                                       data_type=args.type,
                                       viz=0,
                                       saveGif=args.saveGif,
                                       saveVideo=args.saveVideo,
                                       task='play'),
                            pred,
                            num_files,
                            viewer=None)

        # run episodes in parallel and evaluate pretrained model