Beispiel #1
0
    def ADfunc(self):
        num_samples_t = self.samples.shape[0]
        print('sample_shape:', self.samples.shape)  #49399,30,6
        print('num_samples_t', num_samples_t)  #49399

        # -- only discriminate one batch for one time -- #
        D_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        DL_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        L_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])
        I_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])
        batch_times = num_samples_t // self.settings['batch_size']
        for batch_idx in range(0,
                               num_samples_t // self.settings['batch_size']):
            # print('batch_idx:{}
            # display batch progress
            model.display_batch_progression(batch_idx, batch_times)
            start_pos = batch_idx * self.settings['batch_size']
            end_pos = start_pos + self.settings['batch_size']
            T_mb = self.samples[start_pos:end_pos, :, :]
            L_mmb = self.labels[start_pos:end_pos, :, :]
            I_mmb = self.index[start_pos:end_pos, :, :]
            para_path = './experiments/parameters/' + self.settings[
                'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                    self.epoch) + '.npy'
            D_t, L_t = DR_discriminator.dis_trained_model(
                self.settings, T_mb, para_path)
            D_test[start_pos:end_pos, :, :] = D_t
            DL_test[start_pos:end_pos, :, :] = L_t
            L_mb[start_pos:end_pos, :, :] = L_mmb
            I_mb[start_pos:end_pos, :, :] = I_mmb

        start_pos = (num_samples_t //
                     self.settings['batch_size']) * self.settings['batch_size']
        end_pos = start_pos + self.settings['batch_size']
        size = samples[start_pos:end_pos, :, :].shape[0]
        fill = np.ones([
            self.settings['batch_size'] - size, samples.shape[1],
            samples.shape[2]
        ])
        batch = np.concatenate([samples[start_pos:end_pos, :, :], fill],
                               axis=0)
        para_path = './experiments/parameters/' + self.settings[
            'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                self.epoch) + '.npy'
        D_t, L_t = DR_discriminator.dis_trained_model(self.settings, batch,
                                                      para_path)
        L_mmb = self.labels[start_pos:end_pos, :, :]
        I_mmb = self.index[start_pos:end_pos, :, :]
        D_test[start_pos:end_pos, :, :] = D_t[:size, :, :]
        DL_test[start_pos:end_pos, :, :] = L_t[:size, :, :]
        L_mb[start_pos:end_pos, :, :] = L_mmb
        I_mb[start_pos:end_pos, :, :] = I_mmb

        results = np.zeros([18, 4])
        for i in range(2, 8):
            tao = 0.1 * i
            Accu2, Pre2, Rec2, F12 = DR_discriminator.detection_Comb(
                DL_test, L_mb, I_mb, self.settings['seq_step'], tao)
            print('seq_length:', self.settings['seq_length'])
            print(
                'Comb-logits-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                .format(self.epoch, tao, Accu2, Pre2, Rec2, F12))
            results[i - 2, :] = [Accu2, Pre2, Rec2, F12]

            Accu3, Pre3, Rec3, F13 = DR_discriminator.detection_Comb(
                D_test, L_mb, I_mb, self.settings['seq_step'], tao)
            print('seq_length:', self.settings['seq_length'])
            print(
                'Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                .format(self.epoch, tao, Accu3, Pre3, Rec3, F13))
            results[i - 2 + 6, :] = [Accu3, Pre3, Rec3, F13]

            Accu5, Pre5, Rec5, F15 = DR_discriminator.sample_detection(
                D_test, L_mb, tao)
            print('seq_length:', self.settings['seq_length'])
            print(
                'sample-wise-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                .format(self.epoch, tao, Accu5, Pre5, Rec5, F15))
            results[i - 2 + 12, :] = [Accu5, Pre5, Rec5, F15]

        return results
Beispiel #2
0
    def ADfunc(self):
        num_samples_t = self.samples.shape[0]
        num_batches = num_samples_t // self.settings['batch_size']

        print('samples shape:', self.samples.shape)
        print('num_samples_t', num_samples_t)
        print('batch_size', self.settings['batch_size'])
        print('num_batches', num_batches)

        # -- only discriminate one batch for one time -- #
        GG = np.empty([
            num_samples_t, self.settings['seq_length'],
            self.settings['num_signals']
        ])
        GG_l = np.empty([
            num_samples_t, self.settings['seq_length'],
            self.settings['num_signals']
        ])
        T_samples = np.empty([
            num_samples_t, self.settings['seq_length'],
            self.settings['num_signals']
        ])
        D_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        DL_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        L_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])
        I_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])

        for batch_idx in range(num_batches):
            model.display_batch_progression(batch_idx, num_batches)
            start_pos = batch_idx * self.settings['batch_size']
            end_pos = start_pos + self.settings['batch_size']
            T_mb = self.samples[start_pos:end_pos, :, :]
            L_mmb = self.labels[start_pos:end_pos, :, :]
            I_mmb = self.index[start_pos:end_pos, :, :]

            # GAN parameters path to load pre trained discriminator and generator
            para_path = './experiments/parameters/' + self.settings[
                'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                    self.epoch_GAN) + '.npy'
            # Discriminator output values using pre trained GAN discriminator model
            if (dgbConfig != 2):
                D_output, D_logits = autoencoderFunctions.discriminatorTrainedModel(
                    self.settings, T_mb, para_path)
                D_test[start_pos:end_pos, :, :] = D_output
                DL_test[start_pos:end_pos, :, :] = D_logits

            # Encoder parameters path to load pre trained encoder
            # Generator output values using pre trained encoder and (GAN) generator model
            if (dgbConfig != 1):
                G_output, G_logits = autoencoderFunctions.encoderGeneratorTrainedModels(
                    self.settings, T_mb, para_path,
                    path_autoencoder_training_parameters)
                GG[start_pos:end_pos, :, :] = G_output
                GG_l[start_pos:end_pos, :, :] = G_logits

            T_samples[start_pos:end_pos, :, :] = T_mb
            L_mb[start_pos:end_pos, :, :] = L_mmb
            I_mb[start_pos:end_pos, :, :] = I_mmb

        # Completes the sample data that wasn't in the last batch because the batch wasn't complete
        start_pos = num_batches * self.settings['batch_size']
        end_pos = start_pos + self.settings['batch_size']
        size = samples[start_pos:end_pos, :, :].shape[0]
        fill = np.ones([
            self.settings['batch_size'] - size, samples.shape[1],
            samples.shape[2]
        ])
        batch = np.concatenate([samples[start_pos:end_pos, :, :], fill],
                               axis=0)

        para_path = './experiments/parameters/' + self.settings[
            'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                self.epoch_GAN) + '.npy'
        if (dgbConfig != 2):
            D_output, D_logits = autoencoderFunctions.discriminatorTrainedModel(
                self.settings, batch, para_path)
            D_test[start_pos:end_pos, :, :] = D_output[:size, :, :]
            DL_test[start_pos:end_pos, :, :] = D_logits[:size, :, :]

        if (dgbConfig != 1):
            G_output, G_logits = autoencoderFunctions.encoderGeneratorTrainedModels(
                self.settings, batch, para_path,
                path_autoencoder_training_parameters)
            GG[start_pos:end_pos, :, :] = G_output[:size, :, :]
            GG_l[start_pos:end_pos, :, :] = G_logits[:size, :, :]

        T_samples[start_pos:end_pos, :, :] = T_mb[:size, :, :]
        L_mmb = self.labels[start_pos:end_pos, :, :]
        I_mmb = self.index[start_pos:end_pos, :, :]
        L_mb[start_pos:end_pos, :, :] = L_mmb
        I_mb[start_pos:end_pos, :, :] = I_mmb

        #------------------------------------------------------------
        savePath_DL1 = path_AD_autoencoder_results + "/DL1" + ".npy"
        savePath_DL2 = path_AD_autoencoder_results + "/DL2" + ".npy"
        savePath_LL = path_AD_autoencoder_results + "/LL" + ".npy"
        savePath_RL = path_AD_autoencoder_results + "/RL" + ".npy"
        savePath_RL_log = path_AD_autoencoder_results + "/RL_log" + ".npy"

        if (dgbConfig == 1):
            D_L_1, D_L_2, L_L = autoencoderFunctions.computeAndSaveDLoss(
                D_test, DL_test, T_samples, L_mb, self.settings['seq_step'],
                savePath_DL1, savePath_DL2, savePath_LL)
        elif (dgbConfig == 2):
            L_L, R_L = autoencoderFunctions.computeAndSaveRLoss(
                GG, T_samples, L_mb, self.settings['seq_step'], savePath_LL,
                savePath_RL)
        else:
            D_L_1, D_L_2, L_L, R_L, R_log_L = autoencoderFunctions.computeAndSaveDandRLosses(
                D_test, DL_test, GG, GG_l, T_samples, L_mb,
                self.settings['seq_step'], savePath_DL1, savePath_DL2,
                savePath_LL, savePath_RL, savePath_RL_log)
        #------------------------------------------------------------

        return
Beispiel #3
0
    def ADfunc(self):
        num_samples_t = self.samples.shape[0]
        t_size = 500
        T_index = np.random.choice(num_samples_t, size=t_size, replace=False)
        print('sample_shape:', self.samples.shape[0])
        print('num_samples_t', num_samples_t)

        # -- only discriminate one batch for one time -- #
        D_test = np.empty([t_size, self.settings['seq_length'], 1])
        DL_test = np.empty([t_size, self.settings['seq_length'], 1])
        GG = np.empty([
            t_size, self.settings['seq_length'], self.settings['num_signals']
        ])
        T_samples = np.empty([
            t_size, self.settings['seq_length'], self.settings['num_signals']
        ])
        L_mb = np.empty([t_size, self.settings['seq_length'], 1])
        I_mb = np.empty([t_size, self.settings['seq_length'], 1])
        for batch_idx in range(0, t_size):
            # print('epoch:{}'.format(self.epoch))
            # print('batch_idx:{}'.format(batch_idx))
            # display batch progress
            model.display_batch_progression(batch_idx, t_size)
            T_mb = self.samples[T_index[batch_idx], :, :]
            L_mmb = self.labels[T_index[batch_idx], :, :]
            I_mmb = self.index[T_index[batch_idx], :, :]
            para_path = './experiments/parameters/' + self.settings[
                'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                    self.epoch) + '.npy'
            D_t, L_t = DR_discriminator.dis_D_model(self.settings, T_mb,
                                                    para_path)
            Gs, Zs, error_per_sample, heuristic_sigma = DR_discriminator.invert(
                self.settings,
                T_mb,
                para_path,
                g_tolerance=None,
                e_tolerance=0.1,
                n_iter=None,
                max_iter=1000,
                heuristic_sigma=None)
            GG[batch_idx, :, :] = Gs
            T_samples[batch_idx, :, :] = T_mb
            D_test[batch_idx, :, :] = D_t
            DL_test[batch_idx, :, :] = L_t
            L_mb[batch_idx, :, :] = L_mmb
            I_mb[batch_idx, :, :] = I_mmb

        # -- use self-defined evaluation functions -- #
        # -- test different tao values for the detection function -- #
        results = np.empty([5, 5])
        # for i in range(2, 8):
        #     tao = 0.1 * i
        tao = 0.5
        lam = 0.8
        Accu1, Pre1, Rec1, F11, FPR1, D_L1 = DR_discriminator.detection_D_I(
            DL_test, L_mb, I_mb, self.settings['seq_step'], tao)
        print('seq_length:', self.settings['seq_length'])
        print(
            'D:Comb-logits-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu1, Pre1, Rec1, F11, FPR1))
        results[0, :] = [Accu1, Pre1, Rec1, F11, FPR1]

        Accu2, Pre2, Rec2, F12, FPR2, D_L2 = DR_discriminator.detection_D_I(
            D_test, L_mb, I_mb, self.settings['seq_step'], tao)
        print('seq_length:', self.settings['seq_length'])
        print(
            'D:Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu2, Pre2, Rec2, F12, FPR2))
        results[1, :] = [Accu2, Pre2, Rec2, F12, FPR2]

        Accu3, Pre3, Rec3, F13, FPR3, D_L3 = DR_discriminator.detection_R_D_I(
            DL_test, GG, T_samples, L_mb, self.settings['seq_step'], tao, lam)
        print('seq_length:', self.settings['seq_length'])
        print(
            'RD:Comb-logits_based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu3, Pre3, Rec3, F13, FPR3))
        results[2, :] = [Accu3, Pre3, Rec3, F13, FPR3]

        Accu4, Pre4, Rec4, F14, FPR4, D_L4 = DR_discriminator.detection_R_D_I(
            D_test, GG, T_samples, L_mb, self.settings['seq_step'], tao, lam)
        print('seq_length:', self.settings['seq_length'])
        print(
            'RD:Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu4, Pre4, Rec4, F14, FPR4))
        results[3, :] = [Accu4, Pre4, Rec4, F14, FPR4]

        Accu5, Pre5, Rec5, F15, FPR5, D_L5 = DR_discriminator.detection_R_I(
            GG, T_samples, L_mb, self.settings['seq_step'], tao)
        print('seq_length:', self.settings['seq_length'])
        print(
            'G:Comb-sample-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu5, Pre5, Rec5, F15, FPR5))
        results[4, :] = [Accu5, Pre5, Rec5, F15, FPR5]

        return results, GG, D_test, DL_test
    def ADfunc(self):
        num_samples_t = self.samples.shape[0]
        t_size = 1156
        print('sample_shape:', self.samples.shape[0])
        print('num_samples_t', num_samples_t)

        T_index1 = np.asarray(list(range(2562, 3140)))  # test_normal
        T_index2 = np.asarray(list(range(6872, 7450)))  # test_anomaly
        T_index = np.concatenate((T_index1, T_index2))

        # -- only discriminate one batch for one time -- #
        D_test = np.empty([t_size, self.settings['seq_length'], 1])
        DL_test = np.empty([t_size, self.settings['seq_length'], 1])
        T_samples = np.empty([
            t_size, self.settings['seq_length'], self.settings['num_signals']
        ])
        L_mb = np.empty([t_size, self.settings['seq_length'], 1])
        I_mb = np.empty([t_size, self.settings['seq_length'], 1])
        # batch_times = t_size // self.settings['batch_size']
        # for batch_idx in range(0, t_size // self.settings['batch_size']):
        for batch_idx in range(0, t_size):
            # print('batch_idx:{}
            # display batch progress
            model.display_batch_progression(batch_idx, t_size)
            # start_pos = batch_idx * self.settings['batch_size']
            # end_pos = start_pos + self.settings['batch_size']
            T_mb = self.samples[T_index[batch_idx], :, :]
            L_mmb = self.labels[T_index[batch_idx], :, :]
            I_mmb = self.index[T_index[batch_idx], :, :]
            para_path = './experiments/parameters/' + self.settings[
                'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                    self.epoch) + '.npy'
            D_t, L_t = DR_discriminator.dis_D_model(self.settings, T_mb,
                                                    para_path)
            T_samples[batch_idx, :, :] = T_mb
            D_test[batch_idx, :, :] = D_t
            DL_test[batch_idx, :, :] = L_t
            L_mb[batch_idx, :, :] = L_mmb
            I_mb[batch_idx, :, :] = I_mmb
        '''
        # start_pos = (num_samples_t // self.settings['batch_size']) * self.settings['batch_size']
        # end_pos = start_pos + self.settings['batch_size']
        # size = samples[start_pos:end_pos, :, :].shape[0]
        # fill = np.ones([self.settings['batch_size'] - size, samples.shape[1], samples.shape[2]])
        # batch = np.concatenate([samples[start_pos:end_pos, :, :], fill], axis=0)
        para_path = './experiments/parameters/' + self.settings['sub_id'] + '_' + str(
            self.settings['seq_length']) + '_' + str(self.epoch) + '.npy'
        D_t, L_t = DR_discriminator.dis_trained_model(self.settings, batch, para_path)
        L_mmb = self.labels[start_pos:end_pos, :, :]
        I_mmb = self.index[start_pos:end_pos, :, :]
        D_test[start_pos:end_pos, :, :] = D_t[:size, :, :]
        DL_test[start_pos:end_pos, :, :] = L_t[:size, :, :]
        L_mb[start_pos:end_pos, :, :] = L_mmb
        I_mb[start_pos:end_pos, :, :] = I_mmb
	'''
        results = np.zeros(5)
        tao = 0.7
        Accu2, Pre2, Rec2, F12, FPR2, D_L2 = DR_discriminator.detection_D_I(
            D_test, L_mb, I_mb, self.settings['seq_step'], tao)
        print('seq_length:', self.settings['seq_length'])
        print(
            'Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec:{:.4}; FPR: {:.4}'
            .format(self.epoch, tao, Accu2, Pre2, Rec2, FPR2))
        results = [Accu2, Pre2, Rec2, F12, FPR2]
        '''
        for i in range(2, 8):
            tao = 0.1 * i
            Accu2, Pre2, Rec2, F12 = DR_discriminator.detection_Comb(
                DL_test, L_mb, I_mb, self.settings['seq_step'], tao)
            print('seq_length:', self.settings['seq_length'])
            print('Comb-logits-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                  .format(self.epoch, tao, Accu2, Pre2, Rec2, F12))
            results[i - 2, :] = [Accu2, Pre2, Rec2, F12]

            Accu3, Pre3, Rec3, F13 = DR_discriminator.detection_Comb(
                D_test, L_mb, I_mb, self.settings['seq_step'], tao)
            print('seq_length:', self.settings['seq_length'])
            print('Comb-statistic-based-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                  .format(self.epoch, tao, Accu3, Pre3, Rec3, F13))
            results[i - 2+6, :] = [Accu3, Pre3, Rec3, F13]

            Accu5, Pre5, Rec5, F15 = DR_discriminator.sample_detection(D_test, L_mb, tao)
            print('seq_length:', self.settings['seq_length'])
            print('sample-wise-Epoch: {}; tao={:.1}; Accu: {:.4}; Pre: {:.4}; Rec: {:.4}; F1: {:.4}'
                  .format(self.epoch, tao, Accu5, Pre5, Rec5, F15))
            results[i - 2+12, :] = [Accu5, Pre5, Rec5, F15]
            '''

        return results
Beispiel #5
0
    def ADfunc(self):
        timeG = []

        num_samples_t = self.samples.shape[0]
        # num_samples_t = 100

        batch_size = self.settings['batch_size']
        # batch_size = 1
        num_batches = num_samples_t // batch_size

        print('samples shape:', self.samples.shape)
        print('num_samples_t', num_samples_t)
        print('batch_size', batch_size)
        print('num_batches', num_batches)

        # -- only discriminate one batch for one time -- #
        D_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        DL_test = np.empty([num_samples_t, self.settings['seq_length'], 1])
        GG = np.empty([
            num_samples_t, self.settings['seq_length'],
            self.settings['num_signals']
        ])
        T_samples = np.empty([
            num_samples_t, self.settings['seq_length'],
            self.settings['num_signals']
        ])
        L_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])
        I_mb = np.empty([num_samples_t, self.settings['seq_length'], 1])

        # for batch_idx in range(num_batches):
        for batch_idx in range(0, num_batches):
            # for batch_idx in range(0, 5):
            model.display_batch_progression(batch_idx, num_batches)
            start_pos = batch_idx * batch_size
            end_pos = start_pos + batch_size
            T_mb = self.samples[start_pos:end_pos, :, :]
            L_mmb = self.labels[start_pos:end_pos, :, :]
            I_mmb = self.index[start_pos:end_pos, :, :]

            para_path = './experiments/parameters/' + self.settings[
                'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                    self.epoch) + '.npy'
            D_t, L_t = DR_discriminator.dis_D_model(self.settings, T_mb,
                                                    para_path)
            # D_t, L_t = autoencoderFunctions.discriminatorTrainedModel(self.settings, T_mb, para_path)

            time1 = time()
            Gs, Zs, error_per_sample, heuristic_sigma = DR_discriminator.invert(
                self.settings,
                T_mb,
                para_path,
                g_tolerance=None,
                e_tolerance=0.1,
                n_iter=None,
                max_iter=10,
                heuristic_sigma=None)
            timeG = np.append(timeG, time() - time1)

            D_test[start_pos:end_pos, :, :] = D_t
            DL_test[start_pos:end_pos, :, :] = L_t
            GG[start_pos:end_pos, :, :] = Gs
            T_samples[start_pos:end_pos, :, :] = T_mb
            L_mb[start_pos:end_pos, :, :] = L_mmb
            I_mb[start_pos:end_pos, :, :] = I_mmb

        # Completes the sample data that wasn't in the last batch because the batch wasn't complete
        start_pos = num_batches * batch_size
        end_pos = start_pos + batch_size
        size = samples[start_pos:end_pos, :, :].shape[0]
        fill = np.ones([batch_size - size, samples.shape[1], samples.shape[2]])
        batch = np.concatenate([samples[start_pos:end_pos, :, :], fill],
                               axis=0)

        para_path = './experiments/parameters/' + self.settings[
            'sub_id'] + '_' + str(self.settings['seq_length']) + '_' + str(
                self.epoch) + '.npy'
        D_t, L_t = DR_discriminator.dis_D_model(self.settings, T_mb, para_path)
        # D_t, L_t = autoencoderFunctions.discriminatorTrainedModel(self.settings, T_mb, para_path)
        # time1 = time()
        Gs, Zs, error_per_sample, heuristic_sigma = DR_discriminator.invert(
            self.settings,
            T_mb,
            para_path,
            g_tolerance=None,
            e_tolerance=0.1,
            n_iter=None,
            max_iter=10,
            heuristic_sigma=None)
        # timeG = np.append(time() - time1)

        np.save(path_AD_autoencoder_results + "/timeG.npy", timeG)

        D_test[start_pos:end_pos, :, :] = D_t[:size, :, :]
        DL_test[start_pos:end_pos, :, :] = L_t[:size, :, :]
        GG[start_pos:end_pos, :, :] = Gs[:size, :, :]
        T_samples[start_pos:end_pos, :, :] = T_mb[:size, :, :]
        L_mmb = self.labels[start_pos:end_pos, :, :]
        I_mmb = self.index[start_pos:end_pos, :, :]
        L_mb[start_pos:end_pos, :, :] = L_mmb
        I_mb[start_pos:end_pos, :, :] = I_mmb

        #------------------------------------------------------------
        savePath_DL1 = path_AD_autoencoder_results + "/DL1" + ".npy"
        savePath_DL2 = path_AD_autoencoder_results + "/DL2" + ".npy"
        savePath_LL = path_AD_autoencoder_results + "/LL" + ".npy"
        savePath_RL = path_AD_autoencoder_results + "/RL" + ".npy"
        D_L_1, D_L_2, L_L, R_L = autoencoderFunctions.computeAndSaveDandRLossesSingleG(
            D_test, DL_test, GG, T_samples, L_mb, self.settings['seq_step'],
            savePath_DL1, savePath_DL2, savePath_LL, savePath_RL)
        #------------------------------------------------------------
        return