def fit(self, data):
        """Fit the model to the given data.

        Args:
            data(pandas.DataFrame): dataset to fit the model.

        Returns:
            None

        """

        self.preprocessor = Preprocessor(
            continuous_columns=self.continuous_columns)
        data = self.preprocessor.fit_transform(data)
        self.metadata = self.preprocessor.metadata
        dataflow = TGANDataFlow(data, self.metadata)
        batch_data = BatchData(dataflow, self.batch_size)
        input_queue = QueueInput(batch_data)

        self.model = self.get_model(training=True)

        from tensorpack.callbacks import CometMLMonitor

        trainer = SeparateGANTrainer(
            model=self.model,
            input_queue=input_queue,
            g_period=6,
        )

        self.restore_path = os.path.join(self.model_dir, 'checkpoint')

        if os.path.isfile(self.restore_path) and self.restore_session:
            session_init = SaverRestore(self.restore_path)
            with open(os.path.join(self.log_dir, 'stats.json')) as f:
                starting_epoch = json.load(f)[-1]['epoch_num'] + 1

        else:
            session_init = None
            starting_epoch = 1

        action = 'k' if self.restore_session else 'd'
        # logger.set_logger_dir(self.log_dir, action=action)

        callbacks = []
        monitors = []
        if self.save_checkpoints:
            callbacks.append(ModelSaver(checkpoint_dir=self.model_dir))
        callbacks.append(MergeAllSummaries(period=10))

        if self.experiment is not None:
            monitors.append(CometMLMonitor(experiment=self.experiment))

        trainer.train_with_defaults(callbacks=callbacks,
                                    monitors=monitors,
                                    steps_per_epoch=self.steps_per_epoch,
                                    max_epoch=self.max_epoch,
                                    session_init=session_init,
                                    starting_epoch=starting_epoch)

        self.prepare_sampling()
Example #2
0
def init(args=None, is_running=0, pt=None):
    global ckpt2
    # 网络
    model = Net2()
    if is_running == 1:
        if pt == "":
            ckpt2 = tf.train.latest_checkpoint(logdir2)
        else:
            ckpt2 = '{}/{}'.format(logdir2, pt)
    else:
        ckpt2 = '{}/{}'.format(
            logdir2,
            args.ckpt) if args.ckpt else tf.train.latest_checkpoint(logdir2)
    session_inits = []
    if ckpt2:
        session_inits.append(SaverRestore(ckpt2))
    pred_conf = PredictConfig(
        model=model,
        input_names=['x_ppgs', 'x_mfccs', 'y_spec', 'y_mel'],
        output_names=['pred_spec', "ppgs"],
        session_init=ChainInit(session_inits))
    global predictor
    predictor = OfflinePredictor(pred_conf)
    if is_running == 1:
        return jsonify({"code": 0, "ckpt": ckpt2})
Example #3
0
    def __init__(self, config):
        """Create a quality that uses nomagic_net as a quality function. """
        from nomagic_submission import ConvNetModel
        from tensorpack.predict.config import PredictConfig
        from tensorpack import SaverRestore
        from tensorpack.predict import OfflinePredictor

        # store parameters
        self._model_path = config['gqcnn_model']
        self._batch_size = config['batch_size']
        self._crop_height = config['crop_height']
        self._crop_width = config['crop_width']
        self._im_height = config['im_height']
        self._im_width = config['im_width']
        self._num_channels = config['num_channels']
        self._pose_dim = config['pose_dim']
        self._gripper_mode = config['gripper_mode']

        # init config
        model = ConvNetModel()
        self._config = PredictConfig(model=model,
                                     session_init=SaverRestore(
                                         self._model_path),
                                     output_names=['prob'])
        self._predictor = OfflinePredictor(self._config)
Example #4
0
    def __init__(self, config):
        """Create a quality that uses `nomagic_net` as a quality function."""
        from nomagic_submission import ConvNetModel
        from tensorpack import SaverRestore
        from tensorpack.predict import OfflinePredictor
        from tensorpack.predict.config import PredictConfig

        GraspQualityFunction.__init(self)

        # Store parameters.
        self._model_path = config["gqcnn_model"]
        self._batch_size = config["batch_size"]
        self._crop_height = config["crop_height"]
        self._crop_width = config["crop_width"]
        self._im_height = config["im_height"]
        self._im_width = config["im_width"]
        self._num_channels = config["num_channels"]
        self._pose_dim = config["pose_dim"]
        self._gripper_mode = config["gripper_mode"]
        self._data_mean = config["data_mean"]
        self._data_std = config["data_std"]

        # Init config.
        model = ConvNetModel()
        self._config = PredictConfig(model=model,
                                     session_init=SaverRestore(
                                         self._model_path),
                                     output_names=["prob"])
        self._predictor = OfflinePredictor(self._config)
Example #5
0
    def fit(self, data):
        """Fit the model to the given data.

        Args:
            data(pandas.DataFrame): dataset to fit the model.

        Returns:
            None

        """
        self.preprocessor = Preprocessor(
            continuous_columns=self.continuous_columns)

        data = self.preprocessor.fit_transform(data)
        self.metadata = self.preprocessor.metadata
        dataflow = TGANDataFlow(data, self.metadata)
        batch_data = BatchData(dataflow, self.batch_size)
        input_queue = QueueInput(batch_data)

        self.model = self.get_model(training=True)

        if self.trainer == 'GANTrainer':
            trainer = GANTrainer(model=self.model, input_queue=input_queue)
        elif self.trainer == 'SeparateGANTrainer':
            trainer = SeparateGANTrainer(model=self.model,
                                         input_queue=input_queue)
        else:
            raise ValueError(
                'Incorrect trainer name. Use GANTrainer or SeparateGANTrainer')

        # trainer = SeparateGANTrainer(model=self.model, input_queue=input_queue)

        self.restore_path = os.path.join(self.model_dir, 'checkpoint')

        if os.path.isfile(self.restore_path) and self.restore_session:
            session_init = SaverRestore(self.restore_path)
            with open(os.path.join(self.log_dir, 'stats.json')) as f:
                starting_epoch = json.load(f)[-1]['epoch_num'] + 1
        else:
            session_init = None
            starting_epoch = 1

        action = 'k' if self.restore_session else None
        logger.set_logger_dir(self.log_dir, action=action)

        callbacks = []
        if self.save_checkpoints:
            callbacks.append(ModelSaver(checkpoint_dir=self.model_dir))

        trainer.train_with_defaults(callbacks=callbacks,
                                    steps_per_epoch=self.steps_per_epoch,
                                    max_epoch=self.max_epoch,
                                    session_init=session_init,
                                    starting_epoch=starting_epoch)

        self.prepare_sampling()
Example #6
0
    def prepare_sampling(self):
        """Prepare model for generate samples."""
        if self.model is None:
            self.model = self.get_model(training=False)
        else:
            self.model.training = False

        predict_config = PredictConfig(
            session_init=SaverRestore(self.restore_path),
            model=self.model,
            input_names=['z'],
            output_names=['gen/gen', 'z'],
        )

        self.simple_dataset_predictor = SimpleDatasetPredictor(
            predict_config, RandomZData((self.batch_size, self.z_dim)))
    parser = argparse.ArgumentParser()
    parser.add_argument('image_path')
    parser.add_argument('--model_path', default='log/checkpoint')
    parser.add_argument('--output_path', default='figures/')
    parser.add_argument('--size', type=int, default=32)
    args = parser.parse_args()

    np.random.seed(0)
    # initialize the model
    predict_func = OfflinePredictor(
        PredictConfig(inputs_desc=[
            InputDesc(tf.float32, [None, INPUT_SIZE, INPUT_SIZE, 2],
                      'input_image')
        ],
                      tower_func=model.feedforward,
                      session_init=SaverRestore(args.model_path),
                      input_names=['input_image'],
                      output_names=['prob']))

    # simulate suda's gridworld input
    image = cv2.imread(
        args.image_path,
        cv2.IMREAD_GRAYSCALE)  # 0 if obstacle, 255 if free space
    h, w = image.shape[:2]
    obj = img2obj(image)  # list containing row major indices of objects

    # specify position is recent memory
    radius = 6
    #s = [340/2, 110/2]  # needs to be a list
    s = [131, 147, 162]
    min_distance = 1  # minimum distance between each station (smaller means more locations are returned)