Exemple #1
0
    def on_epoch_begin(self, epoch, logs={}):
        rand_idxes = np.random.choice(self.X_train.shape[0],
                                      self.lid_subset_size,
                                      replace=False)
        lid = np.mean(
            get_lids_random_batch(self.model,
                                  self.X_train[rand_idxes],
                                  k=self.lid_k,
                                  batch_size=128))

        self.p_lambda = epoch * 1. / self.epochs

        # deal with possible illegal lid value
        if lid > 0:
            self.lids.append(lid)
        else:
            self.lids.append(self.lids[-1])

        # find the turning point where to apply lid-paced learning strategy
        if self.found_turning_point(self.lids):
            self.update_learning_pace()

        if len(self.lids) > 5:
            print('lid = ..., ', self.lids[-5:])
        else:
            print('lid = ..., ', self.lids)

        if self.verbose > 0:
            print(
                '--Epoch: %s, LID: %.2f, min LID: %.2f, lid window: %s, turning epoch: %s, lambda: %.2f'
                % (epoch, lid, np.min(self.lids), self.epoch_win,
                   self.turning_epoch, self.p_lambda))

        return
Exemple #2
0
def get_lid(model, X_test, X_test_noisy, X_test_adv, k=10, batch_size=100, dataset='mnist'):
    """
    Get local intrinsic dimensionality
    :param model: 
    :param X_train: 
    :param Y_train: 
    :param X_test: 
    :param X_test_noisy: 
    :param X_test_adv: 
    :return: artifacts: positive and negative examples with lid values, 
            labels: adversarial (label: 1) and normal/noisy (label: 0) examples
    """
    print('Extract local intrinsic dimensionality: k = %s' % k)
    lids_normal, lids_noisy, lids_adv = get_lids_random_batch(model, X_test, X_test_noisy,
                                                              X_test_adv, dataset, k, batch_size)
    print("lids_normal:", lids_normal.shape)
    print("lids_noisy:", lids_noisy.shape)
    print("lids_adv:", lids_adv.shape)

    ## skip the normalization, you may want to try different normalizations later
    ## so at this step, just save the raw values
    # lids_normal_z, lids_adv_z, lids_noisy_z = normalize(
    #     lids_normal,
    #     lids_adv,
    #     lids_noisy
    # )

    lids_pos = lids_adv
    lids_neg = np.concatenate((lids_normal, lids_noisy))
    artifacts, labels = merge_and_generate_labels(lids_pos, lids_neg)

    return artifacts, labels
def get_lid(model, X_test, X_test_noisy, X_test_adv, k=10, batch_size=100, dataset='mnist'):
    """
    Get local intrinsic dimensionality
    :param model: 
    :param X_train: 
    :param Y_train: 
    :param X_test: 
    :param X_test_noisy: 
    :param X_test_adv: 
    :return: artifacts: positive and negative examples with lid values, 
            labels: adversarial (label: 1) and normal/noisy (label: 0) examples
    """
    print('Extract local intrinsic dimensionality: k = %s' % k)
    lids_normal, lids_noisy, lids_adv = get_lids_random_batch(model, X_test, X_test_noisy,
                                                              X_test_adv, dataset, k, batch_size)
    print("lids_normal:", lids_normal.shape)
    print("lids_noisy:", lids_noisy.shape)
    print("lids_adv:", lids_adv.shape)

    ## skip the normalization, you may want to try different normalizations later
    ## so at this step, just save the raw values
    # lids_normal_z, lids_adv_z, lids_noisy_z = normalize(
    #     lids_normal,
    #     lids_adv,
    #     lids_noisy
    # )

    lids_pos = lids_adv
    lids_neg = np.concatenate((lids_normal, lids_noisy))
    artifacts, labels = merge_and_generate_labels(lids_pos, lids_neg)

    return artifacts, labels
Exemple #4
0
    def on_epoch_end(self, epoch, logs={}):
        tr_acc = logs.get('acc')
        tr_loss = logs.get('loss')
        val_loss = logs.get('val_loss')
        val_acc = logs.get('val_acc')
        # te_loss, te_acc = self.model.evaluate(self.X_test, self.y_test, batch_size=128, verbose=0)
        self.train_loss.append(tr_loss)
        self.test_loss.append(val_loss)
        self.train_acc.append(tr_acc)
        self.test_acc.append(val_acc)

        file_name = 'log/loss_%s_%s_%s.npy' % \
                    (self.model_name, self.dataset, self.noise_ratio)
        np.save(
            file_name,
            np.stack((np.array(self.train_loss), np.array(self.test_loss))))
        file_name = 'log/acc_%s_%s_%s.npy' % \
                    (self.model_name, self.dataset, self.noise_ratio)
        np.save(file_name,
                np.stack((np.array(self.train_acc), np.array(self.test_acc))))

        # print('\n--Epoch %02d, train_loss: %.2f, train_acc: %.2f, val_loss: %.2f, val_acc: %.2f' %
        #       (epoch, tr_loss, tr_acc, val_loss, val_acc))

        # calculate LID/CSR and save every 10 epochs
        if epoch % 1 == 0:
            # compute lid scores
            rand_idxes = np.random.choice(self.X_train.shape[0],
                                          self.lid_subset * 10,
                                          replace=False)
            lid = np.mean(
                get_lids_random_batch(self.model,
                                      self.X_train[rand_idxes],
                                      k=self.lid_k,
                                      batch_size=self.lid_subset))
            self.lids.append(lid)

            file_name = 'log/lid_%s_%s_%s.npy' % \
                        (self.model_name, self.dataset, self.noise_ratio)
            np.save(file_name, np.array(self.lids))

            if len(np.array(self.lids).flatten()) > 20:
                print('lid = ...', self.lids[-20:])
            else:
                print('lid = ', self.lids)

            # compute csr scores
            # LASS to estimate the critical sample ratio
            scale_factor = 255. / (np.max(self.X_test) - np.min(self.X_test))
            y = tf.placeholder(tf.float32,
                               shape=(None, ) + self.y_test.shape[1:])
            csr_model = lass(self.model.layers[0].input,
                             self.model.layers[-1].output,
                             y,
                             a=0.25 / scale_factor,
                             b=0.2 / scale_factor,
                             r=0.3 / scale_factor,
                             iter_max=100)
            rand_idxes = np.random.choice(self.X_test.shape[0],
                                          self.csr_subset,
                                          replace=False)
            X_adv, adv_ind = csr_model.find(self.X_test[rand_idxes],
                                            bs=self.csr_batchsize)
            csr = np.sum(adv_ind) * 1. / self.csr_subset
            self.csrs.append(csr)

            file_name = 'log/csr_%s_%s_%s.npy' % \
                        (self.model_name, self.dataset, self.noise_ratio)
            np.save(file_name, np.array(self.csrs))

            if len(self.csrs) > 20:
                print('csr = ...', self.csrs[-20:])
            else:
                print('csr = ', self.csrs)

        return