Example #1
0
 def accuracy(y_true, y_pred):
     shape = tf.shape(y_pred)
     sequence_lengths = tf.ones(shape[0], dtype=tf.int32) * (shape[1])
     viterbi_sequence, _ = tf.contrib.crf.crf_decode(
         y_pred, self.transitions, sequence_lengths)
     output = keras.backend.one_hot(viterbi_sequence, self.output_dim)
     return categorical_accuracy(y_true, output)
Example #2
0
    def _generic_accuracy(y_true, y_pred):
        if K.int_shape(y_pred)[1] == 1:
            return binary_accuracy(y_true, y_pred)
        if K.int_shape(y_true)[-1] == 1:
            return sparse_categorical_accuracy(y_true, y_pred)

        return categorical_accuracy(y_true, y_pred)
Example #3
0
    def flatten_accuracy_fn(y_true, y_pred):
        rebatched_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
        rebatched_true = K.reshape(y_true, (-1, K.int_shape(y_true)[-1]))

        # Remove any predictions from padded values.
        rebatched_pred = rebatched_pred * K.expand_dims(
            K.cast_to_floatx(K.sum(rebatched_true, -1) > 0))
        return categorical_accuracy(rebatched_true, rebatched_pred)
Example #4
0
def acc_of_all(y_true, y_pred):
    y_true = tf.split(y_true, 64, axis=-1)  # a list of 64 * vectors
    y_pred = tf.split(y_pred, 64, axis=-1)
    num = 0
    for (true, pred) in zip(y_true, y_pred):
        num += tf.reduce_mean(metrics.categorical_accuracy(true, pred))

    return num / 64
Example #5
0
def train_step(inputs, target):
    with tf.GradientTape() as tape:
        predictions = model(inputs, training=True)
        loss = loss_fn(target, predictions) + sum(model.losses)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    acc = tf.reduce_mean(categorical_accuracy(target, predictions))
    return loss, acc
Example #6
0
def acc_of_valid(y_true, y_pred):
    y_true = tf.split(y_true, 64, axis=-1)  # a list of 64 * vectors
    y_pred = tf.split(y_pred, 64, axis=-1)
    num = 0
    for idx, (true, pred) in enumerate(zip(y_true, y_pred)):
        if idx not in [0, 1, 2, 3, 4, 5, 11, 25, 32, 39, 53, 59, 60, 61, 62, 63]:
            num += tf.reduce_mean(metrics.categorical_accuracy(true, pred))

    return num/48
Example #7
0
def evaluate():
    predictions = model([x, a], training=False)
    losses = []
    accuracies = []
    for mask in [mask_tr, mask_va, mask_te]:
        loss = loss_fn(y[mask], predictions[mask])
        loss += sum(model.losses)
        losses.append(loss)
        acc = tf.reduce_mean(categorical_accuracy(y[mask], predictions[mask]))
        accuracies.append(acc)
    return losses, accuracies
Example #8
0
def test():
    predictions = model([X, fltr], training=False)
    losses = []
    accuracies = []
    for mask in [train_mask, val_mask, test_mask]:
        loss = loss_fn(y[mask], predictions[mask])
        loss += sum(model.losses)
        losses.append(loss)
        acc = tf.reduce_mean(categorical_accuracy(y[mask], predictions[mask]))
        accuracies.append(acc)
    return losses, accuracies
Example #9
0
def evaluate(loader):
    output = []
    step = 0
    while step < loader.steps_per_epoch:
        step += 1
        inputs, target = loader.__next__()
        pred = model(inputs, training=False)
        outs = (
            loss_fn(target, pred),
            tf.reduce_mean(categorical_accuracy(target, pred)),
        )
        output.append(outs)
    return np.mean(output, 0)
def evaluate(loader):
    step = 0
    results = []
    for batch in loader:
        step += 1
        inputs, target = batch
        predictions = model(inputs, training=False)
        loss = loss_fn(target, predictions)
        acc = tf.reduce_mean(categorical_accuracy(target, predictions))
        results.append((loss, acc, len(target)))  # Keep track of batch size
        if step == loader.steps_per_epoch:
            results = np.array(results)
            return np.average(results[:, :-1], 0, weights=results[:, -1])
Example #11
0
    def approx_accuracy_round(y_true, y_pred):
        nb_classes = len(Nuscene_dataset.correspondances_classes_index.keys())
        # Extraction des informations des tenseurs
        y_pred_extracted = tf.slice(y_pred, [0, 0, 0],
                                    size=[dataset.batch_size, 1, nb_classes])
        y_pred_extracted = tf.reshape(y_pred_extracted,
                                      [dataset.batch_size, nb_classes])

        y_true_label = tf.slice(y_true, [0, 0, 0],
                                size=[dataset.batch_size, 1, nb_classes])
        y_true_label = tf.reshape(y_true_label,
                                  [dataset.batch_size, nb_classes])

        return categorical_accuracy(y_true_label, fct_approx(y_pred_extracted))
Example #12
0
        def score(y_true, y_pred):
            y_t_rank = len(y_true.shape.as_list())
            y_p_rank = len(y_pred.shape.as_list())
            y_t_last_dim = y_true.shape.as_list()[-1]
            y_p_last_dim = y_pred.shape.as_list()[-1]

            is_binary = y_p_last_dim == 1
            is_sparse_categorical = (y_t_rank < y_p_rank
                                     or y_t_last_dim == 1 and y_p_last_dim > 1)

            if isinstance(metric_function, six.string_types):
                if metric_function in ["accuracy", "acc"]:
                    if is_binary:
                        metric = binary_accuracy(y_true, y_pred)
                    elif is_sparse_categorical:
                        metric = sparse_categorical_accuracy(y_true, y_pred)
                    else:
                        metric = categorical_accuracy(y_true, y_pred)
                else:
                    metric = categorical_accuracy(y_true, y_pred)
            else:
                metric = metric_function(y_true, y_pred)

            return K.cast(metric * (1.0 + delta), K.floatx())
 def on_epoch_end(self, epoch, logs=None):
     target = self.validation[1]
     prediction = np.asarray(self.model.predict(self.validation[0]))
     f1score = self.f1_score(target, prediction)
     precision = self.precision(target, prediction)
     recall = self.recall(target, prediction)
     accuracy = np.mean(categorical_accuracy(target, prediction))
     tf.summary.scalar("f1score", f1score, epoch)
     tf.summary.scalar("precision", precision, epoch)
     tf.summary.scalar("recall", recall, epoch)
     tf.summary.scalar("accuracy", accuracy, epoch)
     print(
         f"Metrics for epoch {epoch}:"
         f" -- val_f1score: {f1score} -- val_precision: {precision} -- val_recall: {recall} "
         f" -- val_accuracy: {accuracy}")
Example #14
0
def evaluate(loader):
    output = []
    step = 0
    while step < loader.steps_per_epoch:
        step += 1
        inputs, target = loader.__next__()
        pred = model(inputs, training=False)
        outs = (
            loss_fn(target, pred),
            tf.reduce_mean(categorical_accuracy(target, pred)),
            len(target),  # Keep track of batch size
        )
        output.append(outs)
        if step == loader.steps_per_epoch:
            output = np.array(output)
            return np.average(output[:, :-1], 0, weights=output[:, -1])
    def _update_metric(self,
                       key: str,
                       y_true: tf.Tensor,
                       y_pred: tf.Tensor,
                       loss: tuple,
                       setup_changes: tf.Tensor,
                       step=0):
        """ Updates the metrics.
        :param key:
        :param y_true:
        :param y_pred:
        :param loss:
        :param grads:
        """
        loss_tt, loss_ms, loss_total = loss

        self._loss_tt.get(key).append(loss_tt)
        self._loss_ms.get(key).append(loss_ms)
        self._loss_total.get(key).append(loss_total)

        self._setup_changes.get(key).append(setup_changes)

        self._tp_obj.update_state(y_true, y_pred)
        self._tp.get(key).append(self._tp_obj.result())

        self._tn_obj.update_state(y_true, y_pred)
        self._tn.get(key).append(self._tn_obj.result())

        self._fp_obj.update_state(y_true, y_pred)
        self._fp.get(key).append(self._fp_obj.result())

        self._fn_obj.update_state(y_true, y_pred)
        self._fn.get(key).append(self._fn_obj.result())

        self._pre_obj.update_state(y_true, y_pred)
        self._pre.get(key).append(self._pre_obj.result())

        self._rec_obj.update_state(y_true, y_pred)
        self._rec.get(key).append(self._rec_obj.result())

        shape = tf.shape(y_true)
        y_true = tf.squeeze(tf.transpose(y_true, [0, 2, 1, 3]))
        y_pred = tf.squeeze(tf.transpose(y_pred, [0, 2, 1, 3]))
        y_pred = tf.reshape(y_pred, [shape[0], shape[2], -1])
        y_true = tf.reshape(y_true, [shape[0], shape[2], -1])

        self._acc.get(key).append(categorical_accuracy(y_true, y_pred))
    def mu_accuracy(self, y_true, y_pred, **args):
        """
            metric that outputs the accuracy when only considering the logits_mu.
            This accuracy should be the same that was obtained with the fake classifier in its best epoch.

            Parameters
            ----------
            y_true:  `np.array`
                the labels in one hot encoding format

            y_pred: `np.array`
                output of the model formed by the concatenation of the original prediction
                in the first num_classes positions, and a beta scalar in the last one


            Returns
            -------
            the accuracy of the original predictions

        """
        mu_probs = y_pred[:, :self.num_classes_]
        y_true_probs = y_true[:, :self.num_classes_]
        return categorical_accuracy(y_true_probs, mu_probs)
Example #17
0
def categorical_error(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:
    return 1.0 - categorical_accuracy(y_true, y_pred)
def categorical_accuracy(y_true, y_pred):
    return K.mean(metrics.categorical_accuracy(y_true, y_pred))
Example #19
0
 def accuracy(y_true, y_pred):
     return metrics.categorical_accuracy(y_true,
                                         y_pred) * correct_mean(y_true)
Example #20
0
def trainRBM(data, learning_rate, k1, k2, epochs, batch_size, dims):
    # import data
    print("importing training data")
    if data == "fashion_mnist":
        fashion_mnist = tf.keras.datasets.fashion_mnist
        (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
    elif data == "mnist":
        mnist = tf.keras.datasets.mnist
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        y_train = tf.keras.utils.to_categorical(y_train, 10)
        y_test = tf.keras.utils.to_categorical(y_test, 10)
    elif data == "faces":
        x_train = [resize(mpimg.imread(file),(28,28)) for file in glob.glob("data/faces/*")]
        x_train = np.asarray(x_train)
        # make images sparse for easier distinctions
        for img in x_train:
            img[img < np.mean(img)+0.5*np.std(img)] = 0
    else:
        raise NameError("unknown data type: %s" % data)
    if data == "mnist" or data == "fashion_mnist":
        x_train = x_train/255.0
        x_test = x_test/255.0
        x_train = tf.cast(tf.reshape(x_train, shape=(x_train.shape[0], 784)), "float32")
        x_test = tf.cast(tf.reshape(
            x_test, shape=(x_test.shape[0], 784)), "float32")
    elif data == "faces":
        # auto conversion to probabilities in earlier step
        x_train = tf.cast(tf.reshape(
            x_train, shape=(x_train.shape[0], 784)), "float32")

    # train_path = tf.keras.utils.get_file(
    #     "iris_training.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv")
    # test_path = tf.keras.utils.get_file(
    #     "iris_test.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv")
    # CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth',
    #                     'PetalLength', 'PetalWidth', 'Species']
    # # SPECIES = ['Setosa', 'Versicolor', 'Virginica']
    # train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
    # test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)

    # y_train, y_test = train.pop('Species').values, test.pop('Species').values
    # x_train, x_test = train.values, test.values
    # y_train = tf.keras.utils.to_categorical(y_train, 3)
    # y_test = tf.keras.utils.to_categorical(y_test, 3)

    feature_dim = x_train.shape[-1]
    output_dim = y_train.shape[-1]

    # x_train = x_train.reshape(x_train.shape[0], x_train.shape[-1])
    # create log directory
    # parse string input into integer list
    inputs = tf.keras.Input(feature_dim)
    outputs = OnlineBolzmannCell(50, online=True, return_hidden=True)(inputs)
    outputs = tf.keras.layers.Dropout(0.2)(outputs)
    # outputs = tf.keras.layers.Dense(100)(inputs)
    outputs = tf.keras.layers.Dense(output_dim, activation='sigmoid')(outputs)
    rbm = tf.keras.Model(inputs, outputs)
    rbm.compile(loss='categorical_crossentropy',
                optimizer="adam", metrics=["accuracy"])
    print('Training...')
    rbm.fit(x_train, y_train, batch_size=batch_size, epochs=1, validation_split=0.1)
    for i in range(epochs):
        pred = rbm.predict(x_train)
        score = rbm.evaluate(x_train, y_train, verbose=0)
        print(f'Epoch {i+1}, loss={score[0]} Accuracy={score[1]}')
    score = rbm.evaluate(x_test, y_test, verbose=0)
    print("Test loss:", score[0])
    print("Test accuracy:", score[1])
    print(10*'#')
    score_train = classification_report(
        y_train.astype(np.bool), rbm.predict(x_train).astype(np.bool))
    score_test = classification_report(
        y_test, rbm.predict(x_test).astype(np.bool))
    acc_train = categorical_accuracy(
        y_train, rbm.predict(x_train))
    acc_test = categorical_accuracy(
        y_test, rbm.predict(x_test))
    print(f'Train score:\nAccuracy: {acc_train}\n{score_train}')
    print(f'Test score:\nAccuracy: {acc_test}\n{score_test}')
    print(10*'#')
    # plot_confusion_matrix(KerasClassifier(rbm, batch_size=batch_size), x_train, y_train>0.51)
    # plot_confusion_matrix(KerasClassifier(
        # rbm, batch_size=batch_size), x_test, y_test > 0.51)
    plt.show()
def acc(y_true, y_pred):
    y_true = y_true[:, :nb_classes]
    y_pred = y_pred[:, :nb_classes]
    return categorical_accuracy(y_true, y_pred)
Example #22
0
def categorical_error(y_true: np.ndarray, y_pred: np.ndarray) -> float:
    return 1.0 - categorical_accuracy(y_true, y_pred)  # type: ignore
Example #23
0
    with tf.GradientTape() as tape:
        pred = model(X, training=True)
        loss = gcnloss(model, y_train[train_mask, :], pred[train_mask])
    # Gradient
    gradients = tape.gradient(loss, model.trainable_variables)
    # Update weights within our model
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    # Early stopping
    pred = model(X, training=False)
    loss_val = gcnloss(model, y_val[val_mask, :], pred[val_mask])
    if loss_val > old_loss_val:
        count += 1
        print("------------------------------------------------------")
        print("Loss ascending count: %d, epoch: %d, loss: %.4f" % (count, epoch, loss_val))
        if count >= 10:
            break
    else:
        count = 0
        if epoch % 5 == 0:
            print("Epoch: %d, loss: %.4f" % (epoch, loss_val))
    old_loss_val = loss_val

# Estimation on the test dataset
print("------------------------------------------------------")
pred = model(X, training=False)
loss_test = gcnloss(model, y_test[test_mask, :], pred[test_mask])
print("Loss on test dataset: %.4f" % loss_test)
accu = categorical_accuracy(y_test[test_mask, :], pred[test_mask])
print("Accuracy: %.4f" % (np.sum(accu) / len(accu)))
Example #24
0
    def __init__(self,
                 model,
                 intensity_range,
                 regularization,
                 input_shape,
                 init_cost,
                 steps,
                 mini_batch,
                 lr,
                 num_classes,
                 upsample_size=UPSAMPLE_SIZE,
                 attack_succ_threshold=ATTACK_SUCC_THRESHOLD,
                 patience=PATIENCE,
                 cost_multiplier=COST_MULTIPLIER,
                 reset_cost_to_zero=RESET_COST_TO_ZERO,
                 mask_min=MASK_MIN,
                 mask_max=MASK_MAX,
                 color_min=COLOR_MIN,
                 color_max=COLOR_MAX,
                 img_color=IMG_COLOR,
                 shuffle=SHUFFLE,
                 batch_size=BATCH_SIZE,
                 verbose=VERBOSE,
                 return_logs=RETURN_LOGS,
                 save_last=SAVE_LAST,
                 epsilon=EPSILON,
                 early_stop=EARLY_STOP,
                 early_stop_threshold=EARLY_STOP_THRESHOLD,
                 early_stop_patience=EARLY_STOP_PATIENCE,
                 save_tmp=SAVE_TMP,
                 tmp_dir=TMP_DIR,
                 raw_input_flag=RAW_INPUT_FLAG):

        assert intensity_range in {'imagenet', 'inception', 'mnist', 'raw'}
        assert regularization in {None, 'l1', 'l2'}

        self.model = model
        self.intensity_range = intensity_range
        self.regularization = regularization
        self.input_shape = input_shape
        self.init_cost = init_cost
        self.steps = steps
        self.mini_batch = mini_batch
        self.lr = lr
        self.num_classes = num_classes
        self.upsample_size = upsample_size
        self.attack_succ_threshold = attack_succ_threshold
        self.patience = patience
        self.cost_multiplier_up = cost_multiplier
        self.cost_multiplier_down = cost_multiplier**1.5
        self.reset_cost_to_zero = reset_cost_to_zero
        self.mask_min = mask_min
        self.mask_max = mask_max
        self.color_min = color_min
        self.color_max = color_max
        self.img_color = img_color
        self.shuffle = shuffle
        self.batch_size = batch_size
        self.verbose = verbose
        self.return_logs = return_logs
        self.save_last = save_last
        self.epsilon = epsilon
        self.early_stop = early_stop
        self.early_stop_threshold = early_stop_threshold
        self.early_stop_patience = early_stop_patience
        self.save_tmp = save_tmp
        self.tmp_dir = tmp_dir
        self.raw_input_flag = raw_input_flag

        mask_size = np.ceil(
            np.array(input_shape[0:2], dtype=float) / upsample_size)
        mask_size = mask_size.astype(int)
        self.mask_size = mask_size
        mask = np.zeros(self.mask_size)
        pattern = np.zeros(input_shape)
        mask = np.expand_dims(mask, axis=2)

        mask_tanh = np.zeros_like(mask)
        pattern_tanh = np.zeros_like(pattern)

        # prepare mask related tensors
        self.mask_tanh_tensor = K.variable(mask_tanh)
        mask_tensor_unrepeat = (K.tanh(self.mask_tanh_tensor) /
                                (2 - self.epsilon) + 0.5)
        mask_tensor_unexpand = K.repeat_elements(mask_tensor_unrepeat,
                                                 rep=self.img_color,
                                                 axis=2)
        self.mask_tensor = K.expand_dims(mask_tensor_unexpand, axis=0)
        upsample_layer = UpSampling2D(size=(self.upsample_size,
                                            self.upsample_size))
        mask_upsample_tensor_uncrop = upsample_layer(self.mask_tensor)
        uncrop_shape = K.int_shape(mask_upsample_tensor_uncrop)[1:]
        cropping_layer = Cropping2D(
            cropping=((0, uncrop_shape[0] - self.input_shape[0]),
                      (0, uncrop_shape[1] - self.input_shape[1])))
        self.mask_upsample_tensor = cropping_layer(mask_upsample_tensor_uncrop)
        reverse_mask_tensor = (K.ones_like(self.mask_upsample_tensor) -
                               self.mask_upsample_tensor)

        def keras_preprocess(x_input, intensity_range):

            if intensity_range is 'raw':
                x_preprocess = x_input

            elif intensity_range is 'imagenet':
                # 'RGB'->'BGR'
                x_tmp = x_input[..., ::-1]
                # Zero-center by mean pixel
                mean = K.constant([[[103.939, 116.779, 123.68]]])
                x_preprocess = x_tmp - mean

            elif intensity_range is 'inception':
                x_preprocess = (x_input / 255.0 - 0.5) * 2.0

            elif intensity_range is 'mnist':
                x_preprocess = x_input / 255.0

            else:
                raise Exception('unknown intensity_range %s' % intensity_range)

            return x_preprocess

        def keras_reverse_preprocess(x_input, intensity_range):

            if intensity_range is 'raw':
                x_reverse = x_input

            elif intensity_range is 'imagenet':
                # Zero-center by mean pixel
                mean = K.constant([[[103.939, 116.779, 123.68]]])
                x_reverse = x_input + mean
                # 'BGR'->'RGB'
                x_reverse = x_reverse[..., ::-1]

            elif intensity_range is 'inception':
                x_reverse = (x_input / 2 + 0.5) * 255.0

            elif intensity_range is 'mnist':
                x_reverse = x_input * 255.0

            else:
                raise Exception('unknown intensity_range %s' % intensity_range)

            return x_reverse

        # prepare pattern related tensors
        self.pattern_tanh_tensor = K.variable(pattern_tanh)
        self.pattern_raw_tensor = ((K.tanh(self.pattern_tanh_tensor) /
                                    (2 - self.epsilon) + 0.5) * 255.0)

        # prepare input image related tensors
        # ignore clip operation here
        # assume input image is already clipped into valid color range
        input_tensor = K.placeholder(model.input_shape)
        if self.raw_input_flag:
            input_raw_tensor = input_tensor
        else:
            input_raw_tensor = keras_reverse_preprocess(
                input_tensor, self.intensity_range)

        # IMPORTANT: MASK OPERATION IN RAW DOMAIN
        X_adv_raw_tensor = (
            reverse_mask_tensor * input_raw_tensor +
            self.mask_upsample_tensor * self.pattern_raw_tensor)

        X_adv_tensor = keras_preprocess(X_adv_raw_tensor, self.intensity_range)

        output_tensor = model(X_adv_tensor)
        y_true_tensor = K.placeholder(model.output_shape)

        self.loss_acc = categorical_accuracy(output_tensor, y_true_tensor)

        self.loss_ce = categorical_crossentropy(output_tensor, y_true_tensor)

        if self.regularization is None:
            self.loss_reg = K.constant(0)
        elif self.regularization is 'l1':
            self.loss_reg = (K.sum(K.abs(self.mask_upsample_tensor)) /
                             self.img_color)
        elif self.regularization is 'l2':
            self.loss_reg = K.sqrt(
                K.sum(K.square(self.mask_upsample_tensor)) / self.img_color)

        cost = self.init_cost
        self.cost_tensor = K.variable(cost)
        self.loss = self.loss_ce + self.loss_reg * self.cost_tensor

        self.opt = Adam(lr=self.lr, beta_1=0.5, beta_2=0.9)
        self.updates = self.opt.get_updates(
            params=[self.pattern_tanh_tensor, self.mask_tanh_tensor],
            loss=self.loss)
        self.train = K.function(
            [input_tensor, y_true_tensor],
            [self.loss_ce, self.loss_reg, self.loss, self.loss_acc],
            updates=self.updates)

        pass
Example #25
0
 def accuracy(self, y_true, y_pred):
     logits = self.get_logits(y_true, y_pred)
     accuracy = categorical_accuracy(y_true, logits)
     return accuracy
Example #26
0
 def mu_accuracy(self, y_true, y_pred, **args):
     logits_phi = y_pred[:, :self.num_classes]
     labels_phi = y_true[:, :self.num_classes]
     return categorical_accuracy(labels_phi, logits_phi)
Example #27
0
 def accuracy(self, y_true, y_pred):
   y_true = y_true[:, :self.num_class]
   y_pred = y_pred[:, :self.num_class]
   return categorical_accuracy(y_true, y_pred)
Example #28
0
def accuracy(y_true,y_pred):
    return categorical_accuracy(y_true,y_pred)
Example #29
0
def acc(y_true, y_pred):
    num_classes = util.NUM_CLASS
    y_true = y_true[:, :num_classes]
    y_pred = y_pred[:, :num_classes]
    return categorical_accuracy(y_true, y_pred)
Example #30
0
def acc(y_true, y_pred):
    y_true = y_true[:, :10]
    y_pred = y_pred[:, :10]
    return categorical_accuracy(y_true, y_pred)