Esempio n. 1
0
def _add_conv_2d(model: tf.keras.Sequential, filters: int):
    model.add(tf.keras.layers.Conv2D(filters,
                                     (4, 4),
                                     strides=(2, 2),
                                     padding='same',
                                     use_bias=False,
                                     kernel_initializer=tf.random_normal_initializer(0.0, 0.02)))
Esempio n. 2
0
def evaluate_model(model: tf.keras.Sequential):
    """

    Args:
        model:

    Returns:

    """
    model.evaluate(CMODEL.x_test, CMODEL.y_test, verbose=2)
    return model
Esempio n. 3
0
def _predict_chars(
    model: tf.keras.Sequential,
    sp: spm.SentencePieceProcessor,
    start_string: str,
    store: _BaseConfig,
) -> str:
    """
    Evaluation step (generating text using the learned model).

    Args:
        model: tf.keras.Sequential model
        sp: SentencePiece tokenizer
        start_string: string to bootstrap model
        store: our config object
    Returns:
        Yields line of text per iteration
    """

    # Converting our start string to numbers (vectorizing)
    input_eval = sp.EncodeAsIds(start_string)
    input_eval = tf.expand_dims(input_eval, 0)

    # Empty string to store each line
    sentence_ids = []

    # Here batch size == 1
    model.reset_states()

    while True:
        predictions = model(input_eval)
        # remove the batch dimension
        predictions = tf.squeeze(predictions, 0)

        # using a categorical distribution to
        # predict the word returned by the model
        predictions = predictions / store.gen_temp
        predicted_id = tf.random.categorical(predictions,
                                             num_samples=1)[-1, 0].numpy()

        # We pass the predicted word as the next input to the model
        # along with the previous hidden state
        input_eval = tf.expand_dims([predicted_id], 0)
        sentence_ids.append(int(predicted_id))

        decoded = sp.DecodeIds(sentence_ids)
        if store.field_delimiter is not None:
            decoded = decoded.replace(store.field_delimiter_token,
                                      store.field_delimiter)

        if "<n>" in decoded:
            return _pred_string(decoded.replace("<n>", ""))
        elif 0 < store.gen_chars <= len(decoded):
            return _pred_string(decoded)
Esempio n. 4
0
 def _add_conv_2d(self,
                  model: tf.keras.Sequential,
                  filters: int,
                  strides: (int, int) = DISCRIMINATOR_STRIDE_SIZE) -> None:
     model.add(
         tf.keras.layers.Conv2D(
             filters,
             KERNEL_SIZE,
             strides=strides,
             padding=PADDING,
             use_bias=USE_BIAS,
             kernel_initializer=tf.random_normal_initializer(0.0, 0.02)))
Esempio n. 5
0
def train_model(model: tf.keras.Sequential):
    """
    Args:
        model: current model

    Returns: model trained
    """
    # Training
    model.fit(x=CMODEL.input_batch,
              y=CMODEL.label_batch,
              batch_size=CMODEL.batch_size,
              epochs=2)
    return model
Esempio n. 6
0
def fit_model(
    tf_model: tf.keras.Sequential,
    training_data,
    validation_data,
    callbacks: None,
    settings=None,
):
    # class_weight_val = np.ones(training_data[1].shape[-1])

    y = np.argmax(training_data[1], axis=-1)
    class_weight_val = class_weight.compute_class_weight(
        class_weight="balanced", classes=np.unique(y), y=y)
    class_weights = dict(zip(np.unique(y), class_weight_val))

    print("Class Weights: ", end="")
    print(class_weights)

    rtn_history = tf_model.fit(
        x=training_data[0],
        y=training_data[1],
        validation_data=validation_data,
        callbacks=callbacks,
        class_weight=class_weights,
        **settings,
    )

    return rtn_history
Esempio n. 7
0
 def _extract_weight_vector(model: tf.keras.Sequential) -> np.ndarray:
     weights = model.get_weights()
     for i in range(len(weights)):
         weights[i] = weights[i].flatten()
     # weights = np.expand_dims(np.concatenate(weights))
     weights = np.concatenate(weights)
     return weights
def select_baseline(df_pos_normalized: pd.DataFrame,
                    model: tf.keras.Sequential,
                    min_p: float = 0.85,
                    max_count: int = 100):
    """Selects the representative subsamnple that will be used as baselines.

  Based on Proposition 3 (Baseline Set for Anomaly Detection) of
  Interpretable, Multidimensional, Multimodal Anomaly Detection with Negative
  Sampling (Sipple 2020).

  Args:
    df_pos_normalized: data frame of the normalized positive sample.
    model: classifier model from NS-NN.
    min_p: minimum class score to be be considered as a baseline normal.
    max_count: maximum number of reference points to be selected.

  Returns:
    data frame of the normalized baseline and the maximum conf score
  """
    x = np.float32(np.matrix(df_pos_normalized))
    y_hat = model.predict(x, verbose=1, steps=1)
    df_pos_normalized[_CLASS_PROB_LABEL] = y_hat
    high_scoring_predictions = df_pos_normalized[
        df_pos_normalized[_CLASS_PROB_LABEL] >= min_p]
    high_scoring_predictions = high_scoring_predictions.sort_values(
        by=_CLASS_PROB_LABEL, ascending=False)
    high_scoring_predictions = high_scoring_predictions.drop(
        columns=[_CLASS_PROB_LABEL])
    return high_scoring_predictions[:max_count], float(max(y_hat))
 def __evaluate_model(self, model: tf.keras.Sequential, x_test: np.ndarray,
                      y_test: np.ndarray) -> None:
     print('[INFO] evaluating network')
     predictions = model.predict(x=x_test, batch_size=1024)
     print(
         classification_report(y_test,
                               predictions.round(),
                               target_names=['Male', 'Female']))
Esempio n. 10
0
def discernHeadline(model: tf.keras.Sequential, tokenizer: Tokenizer):
    # 预测一个实例
    headline = [
        "granny starting to fear spiders in the garden might be real",
        "teh weather today is bright and sunny"
    ]

    seq = tokenizer.texts_to_sequences(headline)
    padded = pad_sequences(seq, maxlen=100, padding="post", truncating="post")

    print(model.predict(padded))
Esempio n. 11
0
def add_classifier(feature_extractor: tf.keras.Sequential, n_class: int, dropout: float=0.4) -> \
    tf.keras.Sequential:
    """
    Add classification layer to feature extraction model
    :param feature_extractor: model to extract features
    :param n_class: classes to differentiate
    :return: classifier model
    """
    feature_extractor.add(Flatten())
    feature_extractor.add(Dense(500, activation='relu'))
    feature_extractor.add(Dropout(dropout))
    feature_extractor.add(Dense(n_class, activation='softmax'))
    return feature_extractor
def train_model_2(config: ConfigParser, model: tf.keras.Sequential, data: Data,
                  save_path: Path, checkpoint_path: Path) -> None:
    version = config['Model']['version']

    callbacks = [
        tfa.callbacks.AverageModelCheckpoint(filepath=str(checkpoint_path) +
                                             '/cp-{epoch:04d}.ckpt',
                                             update_weights=True),
        tf.keras.callbacks.TensorBoard(log_dir=f'logs/{version}_model_2',
                                       profile_batch='100, 110',
                                       histogram_freq=1,
                                       update_freq='batch')
    ]
    optimizer = tf.keras.optimizers.SGD(
        learning_rate=float(config['Model']['learning_rate']))
    # 35 below obtained by inspecting the epoch at which convergence occurred on validation set with TensorBoard.
    optimizer = tfa.optimizers.SWA(optimizer,
                                   start_averaging=35,
                                   average_period=int(
                                       config['Model']['n_models']))

    model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
        metrics=['accuracy'])
    model.fit(data.training_dataset,
              epochs=1000,
              validation_data=data.validation_dataset,
              callbacks=callbacks)

    # Save the model
    model.save(save_path)
    # Remove the model from memory, since OOM might occur.
    del model
Esempio n. 13
0
def display_predictions(chosen_file: str, trained_model: tf.keras.Sequential,
                        original_columns_list):
    """
    Fonction qui affiche les 10 premiers resultats comparant la prediction et la realite
    :param chosen_file: chemin du fichier a comparer
    :param trained_model: modele entraine pour afficher les predictions realisees
    :param original_columns_list: nom des colonnes dans la DF avant nettoyage des donnees
    :return: rien du tout
    """
    df_test = pd.read_pickle(
        chosen_file)  # predictions sur le fichier d'origine pour comparer
    df_test.columns = original_columns_list
    df_test.set_index("MachineIdentifier", inplace=True)
    df_test = cleaning_df(df_test)
    predictions = trained_model.predict(df_test.iloc[:, :-1])
    for i in range(0, 10):
        print("predicted infection: {:.2%} | actual outcome : {}".format(
            predictions[i][0], df_test.iloc[i, -1]))
Esempio n. 14
0
def preprocess_for_lrp(model: tf.keras.Sequential, image: np.ndarray, mask: tf.constant) \
        -> (np.ndarray, list, tf.constant):
    """
    Das Bild wird transformiert wie beim Training. Es wird ein Forward Pass durch das Netz gemacht und der Output jeder
    Schicht wird gespeichert. Der finale Output wird mit der Maske multipliziert, um nur den Output der aktuellen
    Klassifizierung zu erhalten
    :param model: Das trainierte Neuronale Netz
    :param image: Der Input
    :param mask: Ein Array mit 1 bei der aktuellen Klasse und 0 sonst
    :return: Liste der Outputs jeder Schicht, Liste aller Schichten, finaler Output
    """
    # Originalbild wird wie beim Training des Netzes angepasst
    image = preprocess_input(image.copy())

    # Kopie des Models wird angefertigt, damit Gewichte durch Funktion forward() nicht für nachfolgende Anwendungen
    # verändert werden. Letzte Aktivierung (Sigmoid) wird gelöscht.
    new_model = tf.keras.models.clone_model(model)
    new_model.pop()
    new_model.set_weights(model.get_weights())

    # Schichten des Modells werden in Array gespeichert
    layers = new_model.layers

    # Input wird in Netz gegeben und der Output jeder Schicht wird in Array gespeichert
    outputs = [image]
    for i, layer in enumerate(layers):
        output = layer(outputs[i])

        # Auf letzten output soll keine ReLU angewandt werden
        if i < len(layers) - 1 and \
                (isinstance(layer, tf.keras.layers.Conv2D) or isinstance(layer, tf.keras.layers.Dense)):

            output = tf.keras.activations.relu(output)

        outputs.append(output)

    # Output des Netzes wird mit Maske multipliziert, um nur zu erklärenden Output zu erhalten
    output_const = tf.constant(outputs[-1])
    output_const = output_const * mask

    return outputs, layers, output_const
Esempio n. 15
0
def compile_model(model: tf.keras.Sequential, lr=0.001, optim='sgd') -> None:
    loss = tf.keras.losses.SparseCategoricalCrossentropy()
    metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
    if optim == 'sgd':
        model.compile(tf.keras.optimizers.SGD(learning_rate=lr),
                      loss=loss,
                      metrics=metrics)
    elif optim == 'adam':
        model.compile(tf.keras.optimizers.Adam(learning_rate=lr),
                      loss=loss,
                      metrics=metrics)
    elif optim == 'rmsprop':
        model.compile(tf.keras.optimizers.RMSprop(learning_rate=lr),
                      loss=loss,
                      metrics=metrics)
    else:
        raise ValueError(
            "Parameter `optim` accepts {'sgd', 'adam', 'rmsprop'}, "
            f"got {optim}")
Esempio n. 16
0
 def _add_last_activation(self, model: tf.keras.Sequential) -> None:
     model.add(tf.keras.layers.Activation('tanh'))
Esempio n. 17
0
 def _add_upsampling(self, model: tf.keras.Sequential) -> None:
     if UPSAMPLING:
         model.add(tf.keras.layers.UpSampling2D(
             interpolation=INTERPOLATION))
Esempio n. 18
0
def predict_monthly_payment(principal: float, interest_rate: float,
                            number_of_payments: float,
                            model: tf.keras.Sequential) -> float:
    return model.predict([(principal, interest_rate, number_of_payments)
                          ])[0][0]
Esempio n. 19
0
    def add_symmetric_autoencoder(network: tf.keras.Sequential,
                                  layer_dims: List[int],
                                  input_shape=None,
                                  activation="relu",
                                  *args,
                                  **kwargs) -> tf.keras.Sequential:
        """
        Build autoencoder where the hidden state dimensions of the en- and decoder are the same
        :param network: sequential Keras network
        :param layer_dims: list of hidden state dimensions
        :param args: passed to Keras dense layer
        :param kwargs: passed to Keras dense layer
        :return: sequential Keras autoencoder model
        """

        # First layer
        network.add(
            tf.keras.layers.Dense(layer_dims[0], input_shape=input_shape))
        network.add(tf.keras.layers.Activation(activation))
        # Encoder
        for cur_dim in layer_dims[1:]:
            network.add(tf.keras.layers.Dense(cur_dim, *args, **kwargs))
            network.add(tf.keras.layers.Activation(activation))
        # Decoder
        for cur_dim in reversed(layer_dims[:-1]):
            network.add(tf.keras.layers.Dense(cur_dim, *args, **kwargs))
            network.add(tf.keras.layers.Activation(activation))
Esempio n. 20
0
    def add_dense(network: tf.keras.Sequential,
                  layer_dims: List[int],
                  input_shape=None,
                  activation="relu",
                  first_l1: float = 0.0,
                  first_l2: float = 0.0,
                  p_dropout: float = None,
                  *args,
                  **kwargs):
        """
        Build a dense model with the given hidden state
        :param network: sequential Keras network
        :param layer_dims: list of hidden state dimensions
        :param first_l1: L1 kernel regulariser on the first layer
        :param first_l2: L2 kernel regulariser on the first layer
        :param p_dropout: dropout percentage after the first layer
        :param args: passed to Keras dense layer
        :param kwargs: passed to Keras dense layer
        :return: sequential Keras dense model
        :return:
        """

        # First layer
        if input_shape:
            network.add(
                tf.keras.layers.Dense(
                    layer_dims[0],
                    input_shape=input_shape,
                    kernel_regularizer=tf.keras.regularizers.L1L2(l1=first_l1,
                                                                  l2=first_l2),
                    bias_regularizer=tf.keras.regularizers.L1L2(l1=first_l1,
                                                                l2=first_l2),
                ))
        else:
            network.add(
                tf.keras.layers.Dense(
                    layer_dims[0],
                    kernel_regularizer=tf.keras.regularizers.L1L2(l1=first_l1,
                                                                  l2=first_l2),
                    bias_regularizer=tf.keras.regularizers.L1L2(l1=first_l1,
                                                                l2=first_l2),
                ))
        network.add(tf.keras.layers.Activation(activation))
        if p_dropout:
            network.add(tf.keras.layers.Dropout(p_dropout))
        # All the other feature_layers
        for cur_dim in layer_dims[1:]:
            network.add(tf.keras.layers.Dense(cur_dim, *args, **kwargs))
            network.add(tf.keras.layers.Activation(activation))
Esempio n. 21
0
def compile_model(tf_model: tf.keras.Sequential, settings: dict,
                  loss_func: tf.keras.losses.Loss) -> tf.keras.Sequential:
    tf_model.compile(loss=loss_func, **settings)
    tf_model.summary()

    return tf_model
Esempio n. 22
0
def _add_dropout(model: tf.keras.Sequential):
    model.add(tf.keras.layers.Dropout(rate=0.5))
Esempio n. 23
0
def _add_discriminator_activation(model: tf.keras.Sequential):
    model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
Esempio n. 24
0
 def _add_zeros_padding(self, model: tf.keras.Sequential) -> None:
     model.add(tf.keras.layers.ZeroPadding2D())
Esempio n. 25
0
 def _add_batch_norm(self, model: tf.keras.Sequential) -> None:
     model.add(tf.keras.layers.BatchNormalization(
         momentum=0.8,
         gamma_initializer=tf.random_normal_initializer(1.0, 0.02)))
Esempio n. 26
0
def _predict_chars(
    model: tf.keras.Sequential,
    tokenizer: BaseTokenizer,
    start_string: Union[str, List[str]],
    store: TensorFlowConfig,
    predict_and_sample: Optional[Callable] = None,
) -> GeneratorType[PredString, None, None]:
    """
    Evaluation step (generating text using the learned model).

    Args:
        model: tf.keras.Sequential model
        tokenizer: A subclass of BaseTokenizer
        start_string: string to bootstrap model. NOTE: this string MUST already have had special tokens
            inserted (i.e. <d>)
        store: our config object
    Returns:
        Yields line of text per iteration
    """

    # Converting our start string to numbers (vectorizing)
    if isinstance(start_string, str):
        start_string = [start_string]

    _start_string = start_string[0]

    start_vec = tokenizer.encode_to_ids(_start_string)
    input_eval = tf.constant(
        [start_vec for _ in range(store.predict_batch_size)])

    if predict_and_sample is None:

        def predict_and_sample(this_input):
            return _predict_and_sample(model, this_input, store.gen_temp)

    # Batch prediction
    batch_sentence_ids = [[] for _ in range(store.predict_batch_size)]
    not_done = set(i for i in range(store.predict_batch_size))

    if store.reset_states:
        # Reset RNN model states between each record created
        # guarantees more consistent record creation over time, at the
        # expense of model accuracy
        model.reset_states()

    prediction_prefix = None
    if _start_string != tokenizer.newline_str:
        if store.field_delimiter is not None:
            prediction_prefix = tokenizer.detokenize_delimiter(_start_string)
        else:
            prediction_prefix = _start_string

    while not_done:
        input_eval = predict_and_sample(input_eval)
        for i in not_done:
            batch_sentence_ids[i].append(int(input_eval[i, 0].numpy()))

        batch_decoded = [(i, tokenizer.decode_from_ids(batch_sentence_ids[i]))
                         for i in not_done]
        batch_decoded = _replace_prefix(batch_decoded, prediction_prefix)
        for i, decoded in batch_decoded:
            end_idx = decoded.find(tokenizer.newline_str)
            if end_idx >= 0:
                decoded = decoded[:end_idx]
                yield PredString(decoded)
                not_done.remove(i)
            elif 0 < store.gen_chars <= len(decoded):
                yield PredString(decoded)
                not_done.remove(i)
Esempio n. 27
0
 def _add_dropout(self, model: tf.keras.Sequential) -> None:
     if ADD_DROPOUT_D:
         model.add(tf.keras.layers.Dropout(rate=DROPOUT_D_RATE))
Esempio n. 28
0
def _add_generator_activation(model: tf.keras.Sequential):
    model.add(tf.keras.layers.ReLU())
Esempio n. 29
0
 def _add_activation(self, model: tf.keras.Sequential) -> None:
     model.add(tf.keras.layers.LeakyReLU(alpha=0.2))
Esempio n. 30
0
def _add_upsampling(model: tf.keras.Sequential):
    model.add(tf.keras.layers.UpSampling2D())