예제 #1
0
    def __init__(self):
        self.segmentations = Queue(1)
        self.features = Queue(1)

        rospy.Subscriber(SEGMENTATION_TOPIC, SegmentedImage,
                         self._segmentation_callback)
        rospy.Subscriber(FEATURES_TOPIC, VGG16Features,
                         self._features_callback)
        self.object_found = rospy.Publisher(OBJECT_TOPIC,
                                            PointStamped,
                                            queue_size=1)
        self.category = rospy.Publisher(CATEGORY_TOPIC, String, queue_size=1)
        self.status = rospy.Publisher(STATUS_TOPIC,
                                      String,
                                      queue_size=1,
                                      latch=True)

        self.eval_config = self._load_config(EVAL_CONF_PATH)
        self.guesser_config = self._load_config(GUESS_CONF_PATH)
        self.qgen_config = self._load_config(QGEN_CONF_PATH)

        self.tokenizer = GWTokenizer(TOKENS_PATH)

        self.tf_config = tf.ConfigProto(log_device_placement=False)
        self.tf_config.gpu_options.allow_growth = True

        self.image_dim = ros_utils.get_image_dim()
예제 #2
0
    if 'image' in config['model']:
        logger.info('Loading images..')
        image_builder = get_img_builder(config['model']['image'], args.img_dir)
        use_resnet = image_builder.is_raw_image()

        assert False, "Guesser + Image is not yet available"

    # Load data
    logger.info('Loading data..')
    trainset = Dataset(args.data_dir, "train", image_builder, crop_builder)
    validset = Dataset(args.data_dir, "valid", image_builder, crop_builder)
    testset = Dataset(args.data_dir, "test", image_builder, crop_builder)

    # Load dictionary
    logger.info('Loading dictionary..')
    tokenizer = GWTokenizer(os.path.join(args.data_dir, args.dict_file))

    # Build Network
    logger.info('Building network..')
    network = GuesserNetwork(config['model'], num_words=tokenizer.no_words)

    # Build Optimizer
    logger.info('Building optimizer..')
    optimizer, outputs = create_optimizer(network, config["optimizer"])

    ###############################
    #  START  TRAINING
    #############################

    # Load config
    batch_size = config['model']['batch_size']
예제 #3
0
        logger.info('Loading images..')
        image_builder = get_img_builder(config['model']['image'], args.img_dir)
        use_resnet = image_builder.is_raw_image()

    # Load data
    logger.info('Loading data..')
    trainset = Dataset(args.data_dir, "train", image_builder, crop_builder,
                       rcnn, args.no_games_to_load)
    validset = Dataset(args.data_dir, "valid", image_builder, crop_builder,
                       rcnn, args.no_games_to_load)
    testset = Dataset(args.data_dir, "test", image_builder, crop_builder, rcnn,
                      args.no_games_to_load)

    # Load dictionary
    logger.info('Loading dictionary..')
    tokenizer = GWTokenizer(args.dict_file)

    # Build Network
    logger.info('Building network..')
    network, batchifier_cstor = create_qgen(config["model"],
                                            num_words=tokenizer.no_words)

    # Build Optimizer
    logger.info('Building optimizer..')
    optimizer, outputs = create_optimizer(
        network, config["optimizer"])  # output:[loss, accuracy]

    ###############################
    #  START  TRAINING
    #############################
    # Load image
    logger.info('Loading images..')
    use_resnet = False
    if 'image' in config['model']:
        logger.info('Loading images..')
        image_builder = get_img_builder(config['model']['image'], args.img_dir)

    # Load data
    logger.info('Loading data..')
    trainset = Dataset(args.data_dir, "train", "guesswhat", image_builder, crop_builder)
    validset = Dataset(args.data_dir, "valid", "guesswhat", image_builder, crop_builder)
    testset = Dataset(args.data_dir, "test", "guesswhat", image_builder, crop_builder)

    # Load dictionary
    logger.info('Loading dictionary..')
    tokenizer = GWTokenizer(args.dict_file)

    # Build Network
    logger.info('Building network..')
    if args.rl_model:
        network = QGenNetworkLSTM(config["model"], num_words=tokenizer.no_words, policy_gradient=True)
        qgen_var = [v for v in tf.global_variables() if "qgen" in v.name]  # and 'rl_baseline' not in v.name
    else:
        network = QGenNetworkLSTM(config["model"], num_words=tokenizer.no_words, policy_gradient=False)
        qgen_var = [v for v in tf.global_variables() if "qgen" in v.name and 'rl_baseline' not in v.name]
    saver = tf.train.Saver(var_list=qgen_var)

    # Build Optimizer
    logger.info('Building optimizer..')
    # optimizer, outputs = create_optimizer(network, config)
    outputs = [network.decoder_state]