Exemple #1
0
    def ADfunc(self):
        num_samples_t = self.samples.shape[0]
        print('sample_shape:', self.samples.shape[0])
        print('num_samples_t', num_samples_t)

        # -- only discriminate one batch for one time -- #
        D_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        DL_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        L_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])
        for batch_idx in range(0, num_samples_t // self.settings['batch_size']):
            start_pos = batch_idx * self.settings['batch_size']
            end_pos = start_pos + self.settings['batch_size']
            T_mb = self.samples[start_pos:end_pos, :, :]
            L_mmb = self.labels[start_pos:end_pos, :, :]
            para_path = './experiments/parameters/' + self.settings['identifier'] + '_' + str(
                self.settings['seq_length']) + '_' + str(self.epoch) + '.npy'
            D_t, L_t = DR_discriminator.dis_trained_model(self.settings, T_mb, para_path)
            D_test[start_pos:end_pos, :, :] = D_t
            DL_test[start_pos:end_pos, :, :] = L_t
            L_mb[start_pos:end_pos, :, :] = L_mmb

        start_pos = (num_samples_t // self.settings['batch_size']) * self.settings['batch_size']
        end_pos = start_pos + self.settings['batch_size']
        size = samples[start_pos:end_pos, :, :].shape[0]
        fill = np.ones([self.settings['batch_size'] - size, samples.shape[1], samples.shape[2]])
        batch = np.concatenate([samples[start_pos:end_pos, :, :], fill], axis=0)
        para_path = './experiments/parameters/' + self.settings['identifier'] + '_' + str(
            self.settings['seq_length']) + '_' + str(self.epoch) + '.npy'
        D_t, L_t = DR_discriminator.dis_trained_model(self.settings, batch, para_path)
        L_mmb = self.labels[start_pos:end_pos, :, :]
        D_test[start_pos:end_pos, :, :] = D_t[:size, :, :]
        DL_test[start_pos:end_pos, :, :] = L_t[:size, :, :]
        L_mb[start_pos:end_pos, :, :] = L_mmb

        # -- use self-defined evaluation functions -- #
        # -- test different tao values for the detection function -- #
        results = np.zeros([12, 5])
        for i in range(2, 8):
            tao = 0.1 * i

            Accu4, Pre4, Rec4, F14, FPR4, D_L4 = DR_discriminator.detection_statistic(D_test, L_mb, tao)
            print('seq_length:', self.settings['seq_length'])
            print('point-wise-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
                  .format(self.epoch, tao, Accu4, Pre4, Rec4, F14, FPR4))
            results[i - 2, :] = [Accu4, Pre4, Rec4, F14, FPR4]

            Accu5, Pre5, Rec5, F15, FPR5 = DR_discriminator.sample_detection(D_test, L_mb, tao)
            print('seq_length:', self.settings['seq_length'])
            print('sample-wise-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
                  .format(self.epoch, tao, Accu5, Pre5, Rec5, F15, FPR5))
            results[i - 2+6, :] = [Accu5, Pre5, Rec5, F15, FPR5]

        return results
Exemple #2
0
    def ADfunc(self):
        num_samples_t = self.samples.shape[0]
        print('sample_shape:', self.samples.shape)  #49399,30,6
        print('num_samples_t', num_samples_t)  #49399

        # -- only discriminate one batch for one time -- #
        D_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        DL_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        L_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])
        I_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])
        batch_times = num_samples_t // self.settings['batch_size']
        for batch_idx in range(0,
                               num_samples_t // self.settings['batch_size']):
            # print('batch_idx:{}
            # display batch progress
            model.display_batch_progression(batch_idx, batch_times)
            start_pos = batch_idx * self.settings['batch_size']
            end_pos = start_pos + self.settings['batch_size']
            T_mb = self.samples[start_pos:end_pos, :, :]
            L_mmb = self.labels[start_pos:end_pos, :, :]
            I_mmb = self.index[start_pos:end_pos, :, :]
            para_path = './experiments/parameters/' + self.settings[
                'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                    self.epoch) + '.npy'
            D_t, L_t = DR_discriminator.dis_trained_model(
                self.settings, T_mb, para_path)
            D_test[start_pos:end_pos, :, :] = D_t
            DL_test[start_pos:end_pos, :, :] = L_t
            L_mb[start_pos:end_pos, :, :] = L_mmb
            I_mb[start_pos:end_pos, :, :] = I_mmb

        start_pos = (num_samples_t //
                     self.settings['batch_size']) * self.settings['batch_size']
        end_pos = start_pos + self.settings['batch_size']
        size = samples[start_pos:end_pos, :, :].shape[0]
        fill = np.ones([
            self.settings['batch_size'] - size, samples.shape[1],
            samples.shape[2]
        ])
        batch = np.concatenate([samples[start_pos:end_pos, :, :], fill],
                               axis=0)
        para_path = './experiments/parameters/' + self.settings[
            'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                self.epoch) + '.npy'
        D_t, L_t = DR_discriminator.dis_trained_model(self.settings, batch,
                                                      para_path)
        L_mmb = self.labels[start_pos:end_pos, :, :]
        I_mmb = self.index[start_pos:end_pos, :, :]
        D_test[start_pos:end_pos, :, :] = D_t[:size, :, :]
        DL_test[start_pos:end_pos, :, :] = L_t[:size, :, :]
        L_mb[start_pos:end_pos, :, :] = L_mmb
        I_mb[start_pos:end_pos, :, :] = I_mmb

        results = np.zeros([18, 4])
        for i in range(2, 8):
            tao = 0.1 * i
            Accu2, Pre2, Rec2, F12 = DR_discriminator.detection_Comb(
                DL_test, L_mb, I_mb, self.settings['seq_step'], tao)
            print('seq_length:', self.settings['seq_length'])
            print(
                'Comb-logits-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                .format(self.epoch, tao, Accu2, Pre2, Rec2, F12))
            results[i - 2, :] = [Accu2, Pre2, Rec2, F12]

            Accu3, Pre3, Rec3, F13 = DR_discriminator.detection_Comb(
                D_test, L_mb, I_mb, self.settings['seq_step'], tao)
            print('seq_length:', self.settings['seq_length'])
            print(
                'Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                .format(self.epoch, tao, Accu3, Pre3, Rec3, F13))
            results[i - 2 + 6, :] = [Accu3, Pre3, Rec3, F13]

            Accu5, Pre5, Rec5, F15 = DR_discriminator.sample_detection(
                D_test, L_mb, tao)
            print('seq_length:', self.settings['seq_length'])
            print(
                'sample-wise-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                .format(self.epoch, tao, Accu5, Pre5, Rec5, F15))
            results[i - 2 + 12, :] = [Accu5, Pre5, Rec5, F15]

        return results
Exemple #3
0
    #  spe = X'(I-PP')X
    I = np.identity(b, float) - np.matmul(pc.transpose(1, 0), pc)
    # I = np.matmul(I, I)
    for i in range(a):
        x = X[i, :].reshape([b, 1])
        y = np.matmul(x.transpose(1, 0), I)
        spe[i] = np.matmul(y, x)

    return spe

spe_n = SPE(X_n, pc)
spe_a = SPE(X_a, pc)

# spe_x = SPE(X, pc_a)

Accu, Pre, Rec, F1, FPR = dr.CUSUM_det(spe_n, spe_a, L_a)
print('SPE_I:Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'.format(Accu, Pre, Rec, F1, FPR))
# f = open("./experiments/plots/Measures_baseline.txt", "a")
# f.write('PCA-SPE:Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}\n'.format(Accu, Pre, Rec, F1, FPR))
# f.close()

#
# # projected values on the principal component
# # T = XP
# T_n = np.matmul(X_n, pc.transpose(1, 0))
# T_a = np.matmul(X_a, pc.transpose(1, 0))
# T_x = np.matmul(X, pc_x.transpose(1, 0))
#
# # projected values
# plt.plot(T_n[0:14400:100], 'b')
# plt.plot(T_a[0:14400:100], 'r')
Exemple #4
0
    TT_labels = T_labels[T_index, :, :]

    aa = TT_samples.shape[0]
    bb = TT_samples.shape[1]
    cc = TT_samples.shape[2]

    GG = np.empty([aa, bb, cc])
    DD = np.empty([aa, bb, cc])
    for i in range(500):
        # T_mb = T_samples[i*test_size:(i+1)*test_size, :, :]
        # L_mb = T_labels[i*test_size:(i+1)*test_size, :, :]
        T_mb = TT_samples[i, :, :]
        L_mb = TT_labels[i, :, :]

        Gs, Zs, error_per_sample, heuristic_sigma = DR_discriminator.invert(settings, epoch, T_mb, g_tolerance=None,
                                                                        e_tolerance=0.1, n_iter=None, max_iter=10000,
                                                                        heuristic_sigma=None)

        GG[i, :, :] = Gs
        print('sample{}; Gs_shape:{}'.format(i, Gs.shape))

        D_T, L_T = DR_discriminator.dis_trained_model(settings, epoch, T_mb)
        DD[i, :, :] = D_T
        print('sample{}; DT_shape:{}'.format(i, D_T.shape))





    Accu1, Pre1, Rec1, F11, FPR1, D_L = DR_discriminator.detection_statistic_R_D(DD, GG, T_samples, T_labels, 0.5, 0.8)
    print('point-wise-Epoch: {}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
    def ADfunc(self):
        num_samples_t = self.samples.shape[0]
        t_size = 1156
        print('sample_shape:', self.samples.shape[0])
        print('num_samples_t', num_samples_t)

        T_index1 = np.asarray(list(range(2562, 3140)))  # test_normal
        T_index2 = np.asarray(list(range(6872, 7450)))  # test_anomaly
        T_index = np.concatenate((T_index1, T_index2))

        # -- only discriminate one batch for one time -- #
        D_test = np.empty([t_size, self.settings['seq_length'], 1])
        DL_test = np.empty([t_size, self.settings['seq_length'], 1])
        T_samples = np.empty([
            t_size, self.settings['seq_length'], self.settings['num_signals']
        ])
        L_mb = np.empty([t_size, self.settings['seq_length'], 1])
        I_mb = np.empty([t_size, self.settings['seq_length'], 1])
        # batch_times = t_size // self.settings['batch_size']
        # for batch_idx in range(0, t_size // self.settings['batch_size']):
        for batch_idx in range(0, t_size):
            # print('batch_idx:{}
            # display batch progress
            model.display_batch_progression(batch_idx, t_size)
            # start_pos = batch_idx * self.settings['batch_size']
            # end_pos = start_pos + self.settings['batch_size']
            T_mb = self.samples[T_index[batch_idx], :, :]
            L_mmb = self.labels[T_index[batch_idx], :, :]
            I_mmb = self.index[T_index[batch_idx], :, :]
            para_path = './experiments/parameters/' + self.settings[
                'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                    self.epoch) + '.npy'
            D_t, L_t = DR_discriminator.dis_D_model(self.settings, T_mb,
                                                    para_path)
            T_samples[batch_idx, :, :] = T_mb
            D_test[batch_idx, :, :] = D_t
            DL_test[batch_idx, :, :] = L_t
            L_mb[batch_idx, :, :] = L_mmb
            I_mb[batch_idx, :, :] = I_mmb
        '''
        # start_pos = (num_samples_t // self.settings['batch_size']) * self.settings['batch_size']
        # end_pos = start_pos + self.settings['batch_size']
        # size = samples[start_pos:end_pos, :, :].shape[0]
        # fill = np.ones([self.settings['batch_size'] - size, samples.shape[1], samples.shape[2]])
        # batch = np.concatenate([samples[start_pos:end_pos, :, :], fill], axis=0)
        para_path = './experiments/parameters/' + self.settings['sub_id'] + '_' + str(
            self.settings['seq_length']) + '_' + str(self.epoch) + '.npy'
        D_t, L_t = DR_discriminator.dis_trained_model(self.settings, batch, para_path)
        L_mmb = self.labels[start_pos:end_pos, :, :]
        I_mmb = self.index[start_pos:end_pos, :, :]
        D_test[start_pos:end_pos, :, :] = D_t[:size, :, :]
        DL_test[start_pos:end_pos, :, :] = L_t[:size, :, :]
        L_mb[start_pos:end_pos, :, :] = L_mmb
        I_mb[start_pos:end_pos, :, :] = I_mmb
	'''
        results = np.zeros(5)
        tao = 0.7
        Accu2, Pre2, Rec2, F12, FPR2, D_L2 = DR_discriminator.detection_D_I(
            D_test, L_mb, I_mb, self.settings['seq_step'], tao)
        print('seq_length:', self.settings['seq_length'])
        print(
            'Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec:{:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu2, Pre2, Rec2, FPR2))
        results = [Accu2, Pre2, Rec2, F12, FPR2]
        '''
        for i in range(2, 8):
            tao = 0.1 * i
            Accu2, Pre2, Rec2, F12 = DR_discriminator.detection_Comb(
                DL_test, L_mb, I_mb, self.settings['seq_step'], tao)
            print('seq_length:', self.settings['seq_length'])
            print('Comb-logits-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                  .format(self.epoch, tao, Accu2, Pre2, Rec2, F12))
            results[i - 2, :] = [Accu2, Pre2, Rec2, F12]

            Accu3, Pre3, Rec3, F13 = DR_discriminator.detection_Comb(
                D_test, L_mb, I_mb, self.settings['seq_step'], tao)
            print('seq_length:', self.settings['seq_length'])
            print('Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                  .format(self.epoch, tao, Accu3, Pre3, Rec3, F13))
            results[i - 2+6, :] = [Accu3, Pre3, Rec3, F13]

            Accu5, Pre5, Rec5, F15 = DR_discriminator.sample_detection(D_test, L_mb, tao)
            print('seq_length:', self.settings['seq_length'])
            print('sample-wise-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                  .format(self.epoch, tao, Accu5, Pre5, Rec5, F15))
            results[i - 2+12, :] = [Accu5, Pre5, Rec5, F15]
            '''

        return results
Exemple #6
0
    def ADfunc(self):
        num_samples_t = self.samples.shape[0]
        t_size = 500
        T_index = np.random.choice(num_samples_t, size=t_size, replace=False)
        print('sample_shape:', self.samples.shape[0])
        print('num_samples_t', num_samples_t)

        # -- only discriminate one batch for one time -- #
        D_test = np.empty([t_size, self.settings['seq_length'], 1])
        DL_test = np.empty([t_size, self.settings['seq_length'], 1])
        GG = np.empty([
            t_size, self.settings['seq_length'], self.settings['num_signals']
        ])
        T_samples = np.empty([
            t_size, self.settings['seq_length'], self.settings['num_signals']
        ])
        L_mb = np.empty([t_size, self.settings['seq_length'], 1])
        I_mb = np.empty([t_size, self.settings['seq_length'], 1])
        for batch_idx in range(0, t_size):
            # print('epoch:{}'.format(self.epoch))
            # print('batch_idx:{}'.format(batch_idx))
            # display batch progress
            model.display_batch_progression(batch_idx, t_size)
            T_mb = self.samples[T_index[batch_idx], :, :]
            L_mmb = self.labels[T_index[batch_idx], :, :]
            I_mmb = self.index[T_index[batch_idx], :, :]
            para_path = './experiments/parameters/' + self.settings[
                'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                    self.epoch) + '.npy'
            D_t, L_t = DR_discriminator.dis_D_model(self.settings, T_mb,
                                                    para_path)
            Gs, Zs, error_per_sample, heuristic_sigma = DR_discriminator.invert(
                self.settings,
                T_mb,
                para_path,
                g_tolerance=None,
                e_tolerance=0.1,
                n_iter=None,
                max_iter=1000,
                heuristic_sigma=None)
            GG[batch_idx, :, :] = Gs
            T_samples[batch_idx, :, :] = T_mb
            D_test[batch_idx, :, :] = D_t
            DL_test[batch_idx, :, :] = L_t
            L_mb[batch_idx, :, :] = L_mmb
            I_mb[batch_idx, :, :] = I_mmb

        # -- use self-defined evaluation functions -- #
        # -- test different tao values for the detection function -- #
        results = np.empty([5, 5])
        # for i in range(2, 8):
        #     tao = 0.1 * i
        tao = 0.5
        lam = 0.8
        Accu1, Pre1, Rec1, F11, FPR1, D_L1 = DR_discriminator.detection_D_I(
            DL_test, L_mb, I_mb, self.settings['seq_step'], tao)
        print('seq_length:', self.settings['seq_length'])
        print(
            'D:Comb-logits-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu1, Pre1, Rec1, F11, FPR1))
        results[0, :] = [Accu1, Pre1, Rec1, F11, FPR1]

        Accu2, Pre2, Rec2, F12, FPR2, D_L2 = DR_discriminator.detection_D_I(
            D_test, L_mb, I_mb, self.settings['seq_step'], tao)
        print('seq_length:', self.settings['seq_length'])
        print(
            'D:Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu2, Pre2, Rec2, F12, FPR2))
        results[1, :] = [Accu2, Pre2, Rec2, F12, FPR2]

        Accu3, Pre3, Rec3, F13, FPR3, D_L3 = DR_discriminator.detection_R_D_I(
            DL_test, GG, T_samples, L_mb, self.settings['seq_step'], tao, lam)
        print('seq_length:', self.settings['seq_length'])
        print(
            'RD:Comb-logits_based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu3, Pre3, Rec3, F13, FPR3))
        results[2, :] = [Accu3, Pre3, Rec3, F13, FPR3]

        Accu4, Pre4, Rec4, F14, FPR4, D_L4 = DR_discriminator.detection_R_D_I(
            D_test, GG, T_samples, L_mb, self.settings['seq_step'], tao, lam)
        print('seq_length:', self.settings['seq_length'])
        print(
            'RD:Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu4, Pre4, Rec4, F14, FPR4))
        results[3, :] = [Accu4, Pre4, Rec4, F14, FPR4]

        Accu5, Pre5, Rec5, F15, FPR5, D_L5 = DR_discriminator.detection_R_I(
            GG, T_samples, L_mb, self.settings['seq_step'], tao)
        print('seq_length:', self.settings['seq_length'])
        print(
            'G:Comb-sample-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu5, Pre5, Rec5, F15, FPR5))
        results[4, :] = [Accu5, Pre5, Rec5, F15, FPR5]

        return results, GG, D_test, DL_test
Exemple #7
0
            end_pos = start_pos + settings['batch_size']
            D_test[start_pos:end_pos, :, :] = D_t
            DL_test[start_pos:end_pos, :, :] = L_t
            L_mb[start_pos:end_pos, :, :] = L_mmb
            I_mb[start_pos:end_pos, :, :] = I_mmb

        # T_mb, L_mb, I_mb = model.sample_TT(batch_size)
        # D_test, L_test = sess.run([D_pro, L_pro], feed_dict={T: T_mb})

        sss = D_test.shape
        ssss = L_mb.shape

        print('D_test shape:{}'.format(sss))
        print('L_mb shape:{}'.format(ssss))

        Accu1, Pre1, Rec1, F11, FPR1, D_L = DR_discriminator.detection_logits(
            DL_test, L_mb)
        print(
            'logits-based-Epoch: {}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(epoch, Accu1, Pre1, Rec1, F11, FPR1))

        for i in range(3, 8):
            tao = 0.1 * i
            Accu1, Pre1, Rec1, F11, FPR1, D_L = DR_discriminator.detection_logits_I(
                DL_test, L_mb, I_mb, tao)
            print(
                'Comb-logits-based-Epoch: {}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
                .format(epoch, Accu1, Pre1, Rec1, F11, FPR1))

            Accu1, Pre1, Rec1, F11, FPR1, D_L = DR_discriminator.detection_statistic_I(
                D_test, L_mb, I_mb, tao)
            print(
Exemple #8
0
                                    epoch,
                                    identifier,
                                    num_epochs,
                                    resample_rate_in_min,
                                    multivariate_mnist,
                                    seq_length,
                                    labels=vis_sample)

        # DR_discriminator.save_samples(vis_sample, epoch)

        D_test, L_test = sess.run([D_pro, L_pro], feed_dict={T: T_mb})
        sss = D_test.shape
        print('D_test shape:{}'.format(sss))

        # DR_Pro = 1-tf.reduce_mean(D_test)
        Accu1, Pre1, Rec1, F11, FPR1, D_L = DR_discriminator.detection_statistic(
            D_test, L_mb, 0.5)
        print(
            'point-wise-Epoch: {}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(epoch, Accu1, Pre1, Rec1, F11, FPR1))
        DR_discriminator.anomaly_detection_plot(D_test, T_mb, L_mb, D_L, epoch,
                                                identifier)

        Accu, Pre, Rec, F1, FPR = DR_discriminator.sample_detection(
            D_test, L_mb, 0.5)
        print(
            'sample-wise-Epoch: {}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(epoch, Accu, Pre, Rec1, F1, FPR))

        f = open("./experiments/plots/Measures.txt", "a")
        f.write('--------------------------------------------\n')
        f.write(
Exemple #9
0
    def ADfunc(self):
        timeG = []

        num_samples_t = self.samples.shape[0]
        # num_samples_t = 100

        batch_size = self.settings['batch_size']
        # batch_size = 1
        num_batches = num_samples_t // batch_size

        print('samples shape:', self.samples.shape)
        print('num_samples_t', num_samples_t)
        print('batch_size', batch_size)
        print('num_batches', num_batches)

        # -- only discriminate one batch for one time -- #
        D_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        DL_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        GG = np.empty([
            num_samples_t, self.settings['seq_length'],
            self.settings['num_signals']
        ])
        T_samples = np.empty([
            num_samples_t, self.settings['seq_length'],
            self.settings['num_signals']
        ])
        L_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])
        I_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])

        # for batch_idx in range(num_batches):
        for batch_idx in range(0, num_batches):
            # for batch_idx in range(0, 5):
            model.display_batch_progression(batch_idx, num_batches)
            start_pos = batch_idx * batch_size
            end_pos = start_pos + batch_size
            T_mb = self.samples[start_pos:end_pos, :, :]
            L_mmb = self.labels[start_pos:end_pos, :, :]
            I_mmb = self.index[start_pos:end_pos, :, :]

            para_path = './experiments/parameters/' + self.settings[
                'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                    self.epoch) + '.npy'
            D_t, L_t = DR_discriminator.dis_D_model(self.settings, T_mb,
                                                    para_path)
            # D_t, L_t = autoencoderFunctions.discriminatorTrainedModel(self.settings, T_mb, para_path)

            time1 = time()
            Gs, Zs, error_per_sample, heuristic_sigma = DR_discriminator.invert(
                self.settings,
                T_mb,
                para_path,
                g_tolerance=None,
                e_tolerance=0.1,
                n_iter=None,
                max_iter=10,
                heuristic_sigma=None)
            timeG = np.append(timeG, time() - time1)

            D_test[start_pos:end_pos, :, :] = D_t
            DL_test[start_pos:end_pos, :, :] = L_t
            GG[start_pos:end_pos, :, :] = Gs
            T_samples[start_pos:end_pos, :, :] = T_mb
            L_mb[start_pos:end_pos, :, :] = L_mmb
            I_mb[start_pos:end_pos, :, :] = I_mmb

        # Completes the sample data that wasn't in the last batch because the batch wasn't complete
        start_pos = num_batches * batch_size
        end_pos = start_pos + batch_size
        size = samples[start_pos:end_pos, :, :].shape[0]
        fill = np.ones([batch_size - size, samples.shape[1], samples.shape[2]])
        batch = np.concatenate([samples[start_pos:end_pos, :, :], fill],
                               axis=0)

        para_path = './experiments/parameters/' + self.settings[
            'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                self.epoch) + '.npy'
        D_t, L_t = DR_discriminator.dis_D_model(self.settings, T_mb, para_path)
        # D_t, L_t = autoencoderFunctions.discriminatorTrainedModel(self.settings, T_mb, para_path)
        # time1 = time()
        Gs, Zs, error_per_sample, heuristic_sigma = DR_discriminator.invert(
            self.settings,
            T_mb,
            para_path,
            g_tolerance=None,
            e_tolerance=0.1,
            n_iter=None,
            max_iter=10,
            heuristic_sigma=None)
        # timeG = np.append(time() - time1)

        np.save(path_AD_autoencoder_results + "/timeG.npy", timeG)

        D_test[start_pos:end_pos, :, :] = D_t[:size, :, :]
        DL_test[start_pos:end_pos, :, :] = L_t[:size, :, :]
        GG[start_pos:end_pos, :, :] = Gs[:size, :, :]
        T_samples[start_pos:end_pos, :, :] = T_mb[:size, :, :]
        L_mmb = self.labels[start_pos:end_pos, :, :]
        I_mmb = self.index[start_pos:end_pos, :, :]
        L_mb[start_pos:end_pos, :, :] = L_mmb
        I_mb[start_pos:end_pos, :, :] = I_mmb

        #------------------------------------------------------------
        savePath_DL1 = path_AD_autoencoder_results + "/DL1" + ".npy"
        savePath_DL2 = path_AD_autoencoder_results + "/DL2" + ".npy"
        savePath_LL = path_AD_autoencoder_results + "/LL" + ".npy"
        savePath_RL = path_AD_autoencoder_results + "/RL" + ".npy"
        D_L_1, D_L_2, L_L, R_L = autoencoderFunctions.computeAndSaveDandRLossesSingleG(
            D_test, DL_test, GG, T_samples, L_mb, self.settings['seq_step'],
            savePath_DL1, savePath_DL2, savePath_LL, savePath_RL)
        #------------------------------------------------------------
        return