def __init__(self, rare_words_handling=None, words_min_occur=None):
     """
     If an arg is None, it will get its value from config.active_config.
     Args:
       rare_words_handling: {'nothing'|'discard'|'change'}
       words_min_occur: words whose occurrences are less than this are
                        considered rare words
     """
     self._tokenizer = Tokenizer(num_words=100000)
     self._rare_words_handling = (rare_words_handling or
                                  active_config().rare_words_handling)
     self._words_min_occur = (words_min_occur or
                              active_config().words_min_occur)
     self._word_of = {}
def get_dataset_instance(dataset_name=None,
                         lemmatize_caption=None,
                         single_caption=False):
    """
    If an arg is None, it will get its value from config.active_config.
    """
    dataset_name = dataset_name or active_config().dataset_name
    lemmatize_caption = lemmatize_caption or active_config().lemmatize_caption

    for dataset_class in [Flickr8kDataset]:
        if dataset_class.DATASET_NAME == dataset_name:
            return dataset_class(lemmatize_caption=lemmatize_caption,
                                 single_caption=single_caption)

    raise ValueError('Cannot find {} dataset!'.format(dataset_name))
Exemple #3
0
def main(training_dir,
         dataset_type='validation',
         method='beam_search',
         beam_size=3,
         max_caption_length=20):
    if method != 'beam_search':
        raise NotImplementedError('inference method = {} is not implemented '
                                  'yet!'.format(method))
    if dataset_type not in ['validation', 'test']:
        raise ValueError('dataset_type={} is not recognized!'.format(
                                                                dataset_type))

    hyperparam_path = os.path.join(training_dir, 'hyperparams-config.yaml')
    model_weights_path = os.path.join(training_dir, 'model-weights.hdf5')

    logging('Loading hyperparams config..')
    config = FileConfigBuilder(hyperparam_path).build_config()
    config = config._replace(word_vector_init=None)  # As we do an inference
    active_config(config)
    model = ImageCaptioningModel()
    logging('Building model..')
    model.build()
    keras_model = model.keras_model
    logging('Loading model weights..')
    keras_model.load_weights(model_weights_path)

    inference = BeamSearchInference(keras_model,
                                    beam_size=beam_size,
                                    max_caption_length=max_caption_length)
    logging('Evaluating {} set..'.format(dataset_type))
    if dataset_type == 'test':
        metrics, predictions = inference.evaluate_test_set(
                                                    include_prediction=True)
    else:
        metrics, predictions = inference.evaluate_validation_set(
                                                    include_prediction=True)

    logging('Writting result to files..')
    metrics_path = os.path.join(training_dir,
            '{}-metrics-{}-{}.yaml'.format(dataset_type, beam_size,
                                           max_caption_length))
    predictions_path = os.path.join(training_dir,
            '{}-predictions-{}-{}.yaml'.format(dataset_type, beam_size,
                                               max_caption_length))
    write_yaml_file(metrics, metrics_path)
    write_yaml_file(predictions, predictions_path)

    logging('Done!')
    def __init__(self,
                 training_label_prefix,
                 dataset_name=None,
                 epochs=None,
                 time_limit=None,
                 num_gpus=None):
        if not ((epochs is None) ^ (time_limit is None)):
            raise ValueError('epochs or time_limit must present, '
                             'but not both!')

        self._training_label_prefix = training_label_prefix
        self._dataset_name = dataset_name or active_config().dataset_name
        self._validate_training_label_prefix()

        self._epochs = epochs
        self._time_limit = time_limit
        fixed_config_keys = dict(dataset_name=self._dataset_name,
                                 epochs=self._epochs,
                                 time_limit=self._time_limit)
        self._config_builder = Embed300FineRandomConfigBuilder(
                                                            fixed_config_keys)

        try:
            self._num_gpus = len(sh.nvidia_smi('-L').split('\n')) - 1
        except sh.CommandNotFound:
            self._num_gpus = 1
        self._num_gpus = num_gpus or self._num_gpus

        # TODO ! Replace set with a thread-safe set
        self._available_gpus = set(range(self.num_gpus))
        self._semaphore = Semaphore(self.num_gpus)
        self._running_commands = []  # a list of (index, sh.RunningCommand)
        self._stop_search = False
        self._lock = Lock()
 def __init__(self, image_augmentation=None):
     self._image_data_generator = ImageDataGenerator(rotation_range=40,
                                                     width_shift_range=0.2,
                                                     height_shift_range=0.2,
                                                     shear_range=0.2,
                                                     zoom_range=0.2,
                                                     horizontal_flip=True,
                                                     fill_mode='nearest')
     if image_augmentation is None:
         self._image_augmentation_switch = active_config().image_augmentation
     else:
         self._image_augmentation_switch = image_augmentation
Exemple #6
0
    def __init__(self,
                 training_label,
                 conf=None,
                 model_weights_path=None,
                 min_delta=1e-4,
                 min_lr=1e-7,
                 log_metrics_period=4,
                 explode_ratio=0.25,
                 explode_patience=2,
                 max_q_size=10,
                 workers=1,
                 verbose=1):
        """
        Args
          conf: an instance of config.Config; its properties:
            epochs
            time_limit
            reduce_lr_factor
            reduce_lr_patience
            early_stopping_patience
        """
        self._training_label = training_label
        self._config = conf or config.active_config()
        self._epochs = self._config.epochs
        self._time_limit = self._config.time_limit
        self._reduce_lr_factor = self._config.reduce_lr_factor
        self._reduce_lr_patience = self._config.reduce_lr_patience
        self._early_stopping_patience = self._config.early_stopping_patience
        self._model_weights_path = model_weights_path
        self._min_delta = min_delta
        self._min_lr = min_lr
        self._log_metrics_period = log_metrics_period
        self._explode_ratio = explode_ratio
        self._explode_patience = explode_patience
        self._max_q_size = max_q_size
        self._workers = workers
        self._verbose = verbose

        if not ((self._epochs is None) ^ (self._time_limit is None)):
            raise ValueError('Either conf.epochs or conf.time_limit must be '
                             'set, but not both!')

        if self._time_limit:
            self._epochs = sys.maxsize

        self._activate_config_and_init_dataset_provider()
        self._init_result_dir()
        self._init_callbacks()
        self._model = ImageCaptioningModel()
        self._write_active_config()

        self._stop_training = False
Exemple #7
0
 def __init__(self,
              batch_size=None,
              dataset=None,
              image_preprocessor=None,
              caption_preprocessor=None,
              single_caption=False):
     """
     If an arg is None, it will get its value from config.active_config.
     """
     self._batch_size = batch_size or active_config().batch_size
     self._dataset = (dataset or
                      get_dataset_instance(single_caption=single_caption))
     self._image_preprocessor = image_preprocessor or ImagePreprocessor()
     self._caption_preprocessor = (caption_preprocessor
                                   or CaptionPreprocessor())
     self._single_caption = single_caption
     self._build()
Exemple #8
0
    def __init__(self,
                 learning_rate=None,
                 vocab_size=None,
                 embedding_size=None,
                 rnn_output_size=None,
                 dropout_rate=None,
                 bidirectional_rnn=None,
                 rnn_type=None,
                 rnn_layers=None,
                 l1_reg=None,
                 l2_reg=None,
                 initializer=None,
                 word_vector_init=None):
        """
        If an arg is None, it will get its value from config.active_config.
        """
        self._learning_rate = learning_rate or active_config().learning_rate
        self._vocab_size = vocab_size or active_config().vocab_size
        self._embedding_size = embedding_size or active_config().embedding_size
        self._rnn_output_size = (rnn_output_size
                                 or active_config().rnn_output_size)
        self._dropout_rate = dropout_rate or active_config().dropout_rate
        self._rnn_type = rnn_type or active_config().rnn_type
        self._rnn_layers = rnn_layers or active_config().rnn_layers
        self._word_vector_init = (word_vector_init
                                  or active_config().word_vector_init)

        self._initializer = initializer or active_config().initializer
        if self._initializer == 'vinyals_uniform':
            self._initializer = RandomUniform(-0.08, 0.08)

        if bidirectional_rnn is None:
            self._bidirectional_rnn = active_config().bidirectional_rnn
        else:
            self._bidirectional_rnn = bidirectional_rnn

        l1_reg = l1_reg or active_config().l1_reg
        l2_reg = l2_reg or active_config().l2_reg
        self._regularizer = l1_l2(l1_reg, l2_reg)

        self._keras_model = None

        if self._vocab_size is None:
            raise ValueError('config.active_config().vocab_size cannot be '
                             'None! You should check your config or you can '
                             'explicitly pass the vocab_size argument.')

        if self._rnn_type not in ('lstm', 'gru'):
            raise ValueError('rnn_type must be either "lstm" or "gru"!')

        if self._rnn_layers < 1:
            raise ValueError('rnn_layers must be >= 1!')

        if self._word_vector_init is not None and self._embedding_size != 300:
            raise ValueError('If word_vector_init is not None, embedding_size '
                             'must be 300')
Exemple #9
0
 def _write_active_config(self):
     CONFIG_FILENAME = 'hyperparams-config.yaml'
     self._config_filepath = self._path_from_result_dir(CONFIG_FILENAME)
     config.write_to_file(config.active_config(), self._config_filepath)
Exemple #10
0
 def _activate_config_and_init_dataset_provider(self):
     config.active_config(self._config)
     self._dataset_provider = DatasetProvider()
     config.init_vocab_size(self._dataset_provider.vocab_size)
 def config_used(self):
     config = active_config()
     config = config._replace(epochs=2, time_limit=None, batch_size=2)
     return config