Exemplo n.º 1
0
    def _build_model(self,
                     dataset: Dataset,
                     n_user_dim: int = 1,
                     n_item_dim: int = 1,
                     n_factors: int = 50,
                     **kwargs: Optional[Any]) -> Model:
        """
        Build a Keras model, in this case a GeneralizedMatrixFactorizationModel (GMF)
        model. See [1] for more info. The original code released with [1] can be
        found at [2].

        Parameters
        ----------
        dataset: Dataset
            The input dataset. This is used to specify the 'vocab' size of each of the
            'embedding blocks' (of which there are two in this architecture).
        n_user_dim: int
            The dimensionality of the user input vector. When using metadata, you should
            make sure to set this to the size of each of these vectors.
        n_item_dim: int
            The dimensionality of the item input vector. When using metadata, you should
            make sure to set this to the size of each of these vectors.
        n_factors: int
            The dimensionality of the latent feature space _for both users and items_
            for the GMF component of the architecture.

        Returns
        -------
        output: Model
            The 'complete' Keras Model object.

        References
        ----------
        [1] He et al. https://dl.acm.org/doi/10.1145/3038912.3052569
        [2] https://github.com/hexiangnan/neural_collaborative_filtering
        """

        n_user_vocab = dataset.all_users.shape[0]
        n_item_vocab = dataset.all_items.shape[0]

        if dataset.user_meta is not None:
            n_user_vocab += dataset.user_meta.shape[1]
        if dataset.item_meta is not None:
            n_item_vocab += dataset.item_meta.shape[1]

        user_input, user_bias, user_factors = utils.get_embedding_block(
            n_user_vocab, n_user_dim, n_factors, **kwargs)
        item_input, item_bias, item_factors = utils.get_embedding_block(
            n_item_vocab, n_item_dim, n_factors, **kwargs)

        body = Multiply()([user_factors, item_factors])
        output = Dense(1,
                       activation="sigmoid",
                       kernel_initializer=lecun_uniform())(body)

        return Model(inputs=[user_input, item_input], outputs=output)
Exemplo n.º 2
0
def build_initializer(type, kerasDefaults, seed=None, constant=0.):
    """ Set the initializer to the appropriate Keras initializer function
        based on the input string and learning rate. Other required values
        are set to the Keras default values

        Parameters
        ----------
        type : string
            String to choose the initializer

            Options recognized: 'constant', 'uniform', 'normal',
            'glorot_uniform', 'lecun_uniform', 'he_normal'

            See the Keras documentation for a full description of the options

        kerasDefaults : list
            List of default parameter values to ensure consistency between frameworks

        seed : integer
            Random number seed

        constant : float
            Constant value (for the constant initializer only)

        Return
        ----------
        The appropriate Keras initializer function
    """

    if type == 'constant':
        return initializers.Constant(value=constant)

    elif type == 'uniform':
        return initializers.RandomUniform(minval=kerasDefaults['minval_uniform'],
                                          maxval=kerasDefaults['maxval_uniform'],
                                          seed=seed)

    elif type == 'normal':
        return initializers.RandomNormal(mean=kerasDefaults['mean_normal'],
                                         stddev=kerasDefaults['stddev_normal'],
                                         seed=seed)

    elif type == 'glorot_normal':
        # aka Xavier normal initializer. keras default
        return initializers.glorot_normal(seed=seed)

    elif type == 'glorot_uniform':
        return initializers.glorot_uniform(seed=seed)

    elif type == 'lecun_uniform':
        return initializers.lecun_uniform(seed=seed)

    elif type == 'he_normal':
        return initializers.he_normal(seed=seed)
def create_prm_initializer(prm):

    if prm['initializer'] is None:
        prm['initializer_func'] = None

    if prm['initializer'] == 'glorot_normal':
        prm['initializer_func'] = glorot_normal()

    if prm['initializer'] == 'lecun_uniform':
        prm['initializer_func'] = lecun_uniform()

    if prm['initializer'] == 'lecun_normal':
        prm['initializer_func'] = lecun_normal()

    return (prm)
Exemplo n.º 4
0
 def create_str_to_initialiser_converter(self):
     """Creates a dictionary which converts strings to initialiser"""
     str_to_initialiser_converter = {
         "glorot_normal": initializers.glorot_normal,
         "glorot_uniform": initializers.glorot_uniform,
         "xavier_normal": initializers.glorot_normal,
         "xavier_uniform": initializers.glorot_uniform,
         "xavier": initializers.glorot_uniform,
         "he_normal": initializers.he_normal(),
         "he_uniform": initializers.he_uniform(),
         "lecun_normal": initializers.lecun_normal(),
         "lecun_uniform": initializers.lecun_uniform(),
         "truncated_normal": initializers.TruncatedNormal,
         "variance_scaling": initializers.VarianceScaling,
         "default": initializers.glorot_uniform
     }
     return str_to_initialiser_converter
Exemplo n.º 5
0
def build_initializer(type, kerasDefaults, seed=None, constant=0.):
    """ Set the initializer to the appropriate Keras initializer function
        based on the input string and learning rate. Other required values
        are set to the Keras default values

        Parameters
        ----------
        type : string
            String to choose the initializer

            Options recognized: 'constant', 'uniform', 'normal',
            'glorot_uniform', 'lecun_uniform', 'he_normal'

            See the Keras documentation for a full description of the options

        Returns
        ----------
        The appropriate Keras initializer function
    """

    if type == 'constant':
        return initializers.Constant(value=constant)

    elif type == 'uniform':
        return initializers.RandomUniform(minval=kerasDefaults['minval_uniform'],
                                  maxval=kerasDefaults['maxval_uniform'],
                                  seed=seed)

    elif type == 'normal':
        return initializers.RandomNormal(mean=kerasDefaults['mean_normal'],
                                  stddev=kerasDefaults['stddev_normal'],
                                  seed=seed)

# Not generally available
#    elif type == 'glorot_normal':
#        return initializers.glorot_normal(seed=seed)

    elif type == 'glorot_uniform':
        return initializers.glorot_uniform(seed=seed)

    elif type == 'lecun_uniform':
        return initializers.lecun_uniform(seed=seed)

    elif type == 'he_normal':
        return initializers.he_normal(seed=seed)
Exemplo n.º 6
0
    def vgg16lu(self):
        # Build the network of vgg for 10 classes with massive dropout and weight decay as described in the paper.
        model = Sequential()
        weight_decay = 0.0005

        model.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   input_shape=(self.size, self.size, self.num_channels),
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())
        model.add(Dropout(0.3, seed=1))

        model.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())
        model.add(Dropout(0.4, seed=1))

        model.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())
        model.add(Dropout(0.4, seed=1))

        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())
        model.add(Dropout(0.4, seed=1))

        model.add(
            Conv2D(256, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())
        model.add(Dropout(0.4, seed=1))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())
        model.add(Dropout(0.4, seed=1))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())
        model.add(Dropout(0.4, seed=1))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())
        model.add(Dropout(0.4, seed=1))

        model.add(
            Conv2D(512, (3, 3),
                   padding='same',
                   kernel_initializer=lecun_uniform(seed=1),
                   kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(self.__activation())
        model.add(BatchNormalization())

        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5, seed=1))

        model.add(Flatten())
        model.add(
            Dense(512,
                  kernel_regularizer=regularizers.l2(weight_decay),
                  kernel_initializer=lecun_uniform(seed=1)))
        model.add(self.__activation())
        model.add(BatchNormalization())

        model.add(Dropout(0.5, seed=1))
        model.add(
            Dense(self.num_classes, kernel_initializer=lecun_uniform(seed=1)))
        model.add(Activation('softmax'))

        return model
Exemplo n.º 7
0
    def __init__(
        self,
        # Data
        generator_text: Union[str, List[str]] = None,
        responder_text: List[str] = None,
        command_text: List[str] = None,
        grammar: Union[Dict[str, str], Grammar] = None,
        # Models
        chain: Union[Dict[str], MarkovText] = None,
        phraser: Union[Dict[str], Phraser] = None,
        word_vectors: Union[Dict[str], KeyedVectors] = None,
        nn: Union[Dict[str], Model] = None,
        # Chatterbot
        commander: ChatBot = None,
        **kwargs: Dict[str, int],
    ):
        # Defaults
        kwargs.update({
            "word_vector_size": 256,
            "min_count": 5,
            "max_vocab_size": 40000000
        })

        self.nlp = spacy.load("en")

        corpus = list(map(self.word_split, responder_text))

        # Chain
        if (not chain) or isinstance(chain, dict):
            chain = chain or {}
            for Key, Value in {
                    "state_size": 2,
                    "retain_original": True
            }.items():
                chain.setdefault(Key, Value)

            MarkovText.__init__(
                self,
                None,
                state_size=chain["state_size"],
                parsed_sentences=corpus +
                list(self.generate_corpus(generator_text)),
                retain_original=chain["retain_original"],
            )
        else:
            MarkovText.__init__(
                self,
                None,
                state_size=chain.state_size,
                chain=chain,
                parsed_sentences=chain.parsed_sentences,
                retain_original=chain.retain_original,
            )

        corpus = [[word.split(self.separator)[0] for word in sentence]
                  for sentence in corpus]

        # Phraser
        if (not phraser) or isinstance(phraser, dict):
            phraser = phraser or {}
            for Key, Value in {"gram_size": 3, "scoring": "default"}.items():
                phraser.setdefault(Key, Value)

            for _ in range(phraser["gram_size"]):
                self.phraser = Phraser(
                    Phrases(
                        corpus,
                        min_count=kwargs["min_count"],
                        max_vocab_size=kwargs["max_vocab_size"],
                        scoring=phraser["scoring"],
                    ))
                corpus = self.phraser[corpus]
        else:
            self.phraser = phraser
            corpus = self.phraser[corpus]

        # Word Vectors
        if (not word_vectors) or isinstance(word_vectors, dict):
            word_vectors = word_vectors or {}
            for Key, Value in {
                    "embedding_model": "fasttext",
                    "window": 5,
                    "workers": 3,
            }.items():
                word_vectors.setdefault(Key, Value)

            self.word_vectors = {
                "fasttext": FastText,
                "word2vec": Word2Vec
            }[word_vectors["embedding_model"].lower()](
                corpus,
                size=kwargs["word_vector_size"],
                window=word_vectors["window"],
                min_count=1,  # kwargs["min_count"],
                workers=word_vectors["workers"],
                max_vocab_size=kwargs["max_vocab_size"],
            ).wv
        else:
            self.word_vectors = word_vectors

        # LSTM RNN
        if (not nn) or isinstance(nn, dict):
            nn = nn or {}
            for Key, Value in {
                    "cell_type": "LSTM",
                    # "num_layers": 3, Perhaps later
                    "max_words": 100,
                    "sentence_vector_size": 300,
                    "activation": "tanh",
                    "dropout_rate": .2,
                    "loss": "categorical_crossentropy",
                    "learning_rate": .0005,
                    "metrics": ["accuracy"],
            }.items():
                nn.setdefault(Key, Value)

            input_statement = Input(
                shape=(nn["max_words"], kwargs["word_vector_size"]),
                name="input_statement",
            )
            input_response = Input(
                shape=(nn["max_words"], kwargs["word_vector_size"]),
                name="input_response",
            )

            self.nn = Model(
                inputs=[input_statement, input_response],
                outputs=[
                    Dense(kwargs["max_vocab_size"], activation="softmax")(
                        Dense(kwargs["max_vocab_size"] / 2,
                              activation="relu")(concatenate(
                                  [
                                      Bidirectional({
                                          "LSTM": LSTM,
                                          "GRU": GRU
                                      }[nn["cell_type"]](
                                          units=nn["sentence_vector_size"],
                                          input_shape=(
                                              nn["max_words"],
                                              kwargs["word_vector_size"],
                                          ),
                                          activation=nn["activation"],
                                          dropout=nn["dropout_rate"],
                                          kernel_initializer=lecun_uniform(),
                                      ))(input_statement),
                                      Bidirectional({
                                          "LSTM": LSTM,
                                          "GRU": GRU
                                      }[nn["cell_type"]](
                                          units=nn["sentence_vector_size"],
                                          input_shape=(
                                              nn["max_words"],
                                              kwargs["word_vector_size"],
                                          ),
                                          activation=nn["activation"],
                                          dropout=nn["dropout_rate"],
                                          kernel_initializer=lecun_uniform(),
                                      ))(input_response),
                                  ],
                                  axis=1,
                              )))
                ],
            )
            self.nn.compile(
                loss=nn["loss"],
                optimizer=Adam(lr=nn["learning_rate"]),
                metrics=nn["metrics"],
            )
        else:
            self.nn = nn

        # Commander
        self.commander = commander or ChatBot(
            "Commander",
            preprocessors=[
                "chatterbot.preprocessors.clean_whitespace",
                "chatterbot.preprocessors.convert_to_ascii",
            ],
            trainer="chatterbot.trainers.ListTrainer",
            logic_adapters=[
                {
                    "import_path": "chatterbot.logic.BestMatch"
                },
                {
                    "import_path": "chatterbot.logic.LowConfidenceAdapter",
                    "threshold": 0.65,
                    "default_response": "FAIL",
                },
            ],
        )
        if command_text:
            self.commander.train(command_text)

        # Grammar
        if (not grammar) or isinstance(grammar, dict):
            grammar = grammar or {}
            for Key, Value in {}.items():
                grammar.setdefault(Key, Value)

            self.grammar = Grammar(grammar)
            self.grammar.add_modifiers(base_english)
        else:
            self.grammar = grammar
Exemplo n.º 8
0
    plt.suptitle('Weight matrices variation')

    plt.show()


model = Sequential([
    Dense(
        units=4,
        input_shape=(4, ),
        activation=relu,
        trainable=False,  # to freeze the layer
        kernel_initializer=random_uniform(),
        bias_initializer=ones()),
    Dense(units=2,
          activation=relu,
          kernel_initializer=lecun_uniform(),
          bias_initializer=zeros()),
    Dense(units=4, activation=softmax)
])

model.summary()

W0_layers = get_weights(model)
b0_layers = get_biases(model)

X_train = np.random.random((100, 4))
y_train = X_train

X_test = np.random.random((20, 4))
y_test = X_test