def keep(self):
        self.parent.statusbar.showMessage("Keep Columns in process...")
        self.columns_to_keep = []

        i = 0
        while self.model.item(i):
            if self.model.item(i).checkState() == 2:
                self.columns_to_keep.append(self.model.item(i).text())
            i += 1
        
        query = "Are you sure you want to proceed?"
        reply = QMessageBox.question(self, 'Message',query, QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
        if reply == QMessageBox.No:
            self.columns_to_keep = []
            return
        
        self.parent.statusbar.showMessage("Processing...")
        
        self.parent.ui.logOutput.append("KEPT:")
        print("KEPT:")
        
        #per file first retrieve data then filter columnarly
        for file in self.batch_files:
        
            list_colindx = []

            datablob = helpers.clean_header(file)
            for item in self.columns_to_keep:
                list_colindx.append(datablob[0].index(item))
                        
            for i in range(len(datablob)):
                datablob[i][:] = [x for i, x in enumerate(datablob[i]) if i in list_colindx]
                        
            split_name = file.split('.')
            now = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
            output_file = split_name[0] + "_keep_columns_" + now + "." + split_name[1]
            
            helpers.write_out_to_file(output_file,datablob)
            
            self.parent.ui.logOutput.append("      " + str(file.split('/')[-1]))
            print("      " + str(file.split('/')[-1]))
        
        helpers.job_complete_message(self)
            

       
        self.parent.statusbar.showMessage("Welcome Back!")
        self.close()
    def stack(self):
        self.parent.statusbar.showMessage("Stack Datasets in process...")
        self.full_data_complement = {}
        self.columns = []
        
        self.parent.ui.logOutput.append("STACKED:")
        print("STACKED:")

        
        for idx, file in enumerate(self.batch_files):
            datablob = helpers.clean_header(file)
            
            if idx == 0:
                self.columns = datablob[0]
                
            self.full_data_complement[idx] = datablob
            
            self.parent.ui.logOutput.append("      " + str(file.split('/')[-1]))
            print("      " + str(file.split('/')[-1]))

         
        csv.register_dialect('CSV', delimiter='\t', quoting=csv.QUOTE_NONE)
        
        now = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
        output_file = os.getcwd() + "\\STACKED_DATASETS_" + now + ".txt"
        
        with open(output_file, 'w',newline='') as f:
            writer = csv.writer(f, 'CSV')
         
            writer.writerow(self.columns)
            
            for i in range(len(self.full_data_complement)):
                for j in range(len(self.full_data_complement[i])):
                    if j > 0:
                        writer.writerow(self.full_data_complement[i][j])
        
        #after job has been completed!
        helpers.job_complete_message(self)     
    def AOI_aggregate(self, type):
        # update the status bar
        self.parent.statusbar.showMessage(type + " Aggregation in process...")
        
        self.parent.ui.logOutput.append("   " + str(type))
        print("   " + str(type))
        
        # final dictionary which will hold all the AOI data from each file on a per StimulusName per typeAOI pattern
        self.full_AOI_complement = {}
        
        # list of columns to use in the AOI aggregation algorithm (could possibly be shortened for efficiency)
        self.cols_to_keep = [   'StudyName', \
                                'Name', \
                                'Age', \
                                'Gender', \
                                'StimulusName', \
                                type, \
                                'Joy Evidence', \
                                'Joy Intensity', \
                                'Anger Evidence', \
                                'Anger Intensity', \
                                'Surprise Evidence', \
                                'Surprise Intensity', \
                                'Fear Evidence', \
                                'Fear Intensity', \
                                'Contempt Evidence', \
                                'Contempt Intensity', \
                                'Disgust Evidence', \
                                'Disgust Intensity', \
                                'Sadness Evidence', \
                                'Sadness Intensity', \
                                'Neutral Evidence', \
                                'Neutral Intensity', \
                                'Positive Evidence', \
                                'Positive Intensity', \
                                'Negative Evidence', \
                                'Negative Intensity', \
                                'Raw Internal ADC A13 (Shimmer Sensor)', \
                                'Raw GSR (Shimmer Sensor)', \
                                'Cal Internal ADC A13 (Shimmer Sensor)', \
                                'Cal GSR (Shimmer Sensor)']    
        
        # algorithm builds line per line 
        for idx, file in enumerate(self.batch_files):

            # pull all the data and clean the header in the process into a self.data variable 
            self.data = helpers.clean_header(file)
            
            # rename variable for easier use
            datablob = self.data
            
            # get indexes of all the columns you want to keep (REMOVE UNUSED COLUMNS)
            self.indexed_cols = []
            
            for item in self.cols_to_keep:
                self.indexed_cols.append(datablob[0].index(item))

            self.indexed_cols = sorted(self.indexed_cols)
                
            # reset datablob[i]'th line to a new line with less columns 
            for i in range(len(datablob)):
                row = []
                for j in self.indexed_cols:
                    try:
                        row.append(datablob[i][j])
                    except IndexError:
                        print("Error at index: (" + str(i) + "," + str(j) + ")")
                datablob[i] = row
           
            # set empty AOI fields to a 'Z'-Value (SETTING NON-AOI AREAS)
            self.stim_name_idx = datablob[0].index('StimulusName')
            self.aoi_idx = datablob[0].index(type)
         
            self.stim_name_package = []
            
            for i in range(1,len(datablob)):
                if datablob[i][self.stim_name_idx] not in self.stim_name_package:
                    self.stim_name_package.append(datablob[i][self.stim_name_idx])
            
            self.little_dict = {}
            pattern_dict = self.little_dict
            
            for item in self.stim_name_package:
                self.AOIs = []
                for i in range(len(datablob)):
                    if datablob[i][self.stim_name_idx] == item and datablob[i][self.aoi_idx] not in self.AOIs:
                            self.AOIs.append(datablob[i][self.aoi_idx])
                pattern_dict[item] = self.AOIs

            for i in range(1,len(datablob)):
                if pattern_dict[str(datablob[i][self.stim_name_idx])] != [''] and datablob[i][self.aoi_idx] == '':
                    datablob[i][self.aoi_idx] = 'Z'
                
            # get the index of the typeAOI column (REMOVE EMPTY AOI LINES)
            self.list_colindx = datablob[0].index(type)
             
            # cut dataset shorter by removing empty AOI fields 
            datablob = [item for idx, item in enumerate(datablob) if datablob[idx][self.list_colindx] != '']        
             
            # begin the per StimulusName per typeAOI patterning (REPATTERN ALGORITHM)
            self.stim_name_idx = datablob[0].index('StimulusName')
            self.aoi_idx = datablob[0].index(type)
         
            self.stim_name_package = []
            
            for i in range(1,len(datablob)):
                if datablob[i][self.stim_name_idx] not in self.stim_name_package:
                    self.stim_name_package.append(datablob[i][self.stim_name_idx])
            
            self.little_dict = {}
            pattern_dict = self.little_dict
            
            for item in self.stim_name_package:
                self.AOIs = []
                for i in range(len(datablob)):
                    if datablob[i][self.stim_name_idx] == item and datablob[i][self.aoi_idx] not in self.AOIs:
                            self.AOIs.append(datablob[i][self.aoi_idx])
                pattern_dict[item] = self.AOIs
            
            self.column_data = []
            for i in range(len(self.cols_to_keep)):
                self.column_data.append(self.cols_to_keep[i])
            final_blob = [self.column_data]
            final_blob[0].append('FACET Point Weight')
            final_blob[0].append('FACET Point Weight %')
            final_blob[0].append('GSR Point Weight')
            final_blob[0].append('GSR Point Weight %')


            for stimulus_name in pattern_dict:
            
                #point weight percentage counters
                weight_counter_facet = 0
                weight_counter_gsr = 0
                
                for aoi_field in pattern_dict[stimulus_name]:
                    
                    #FACET fields
                    counter_facet = 0
                    joy1 = 0
                    joy2 = 0
                    ang1 = 0
                    ang2 = 0
                    sur1 = 0
                    sur2 = 0
                    fear1 = 0
                    fear2 = 0
                    cont1 = 0
                    cont2 = 0
                    disg1 = 0
                    disg2 = 0
                    sad1 = 0
                    sad2 = 0
                    neu1 = 0
                    neu2 = 0
                    pos1 = 0
                    pos2 = 0
                    neg1 = 0
                    neg2 = 0
                    
                    #shimmer fields
                    counter_gsr = 0            
                    raw_int = 0
                    raw_gsr = 0
                    calc_int = 0
                    calc_gsr = 0
                    
                    for k in range(len(datablob)):
                    
                        if datablob[k][self.stim_name_idx] == stimulus_name and datablob[k][self.aoi_idx] == aoi_field:
                            
                            if counter_gsr == 0:
                                study_name = datablob[k][datablob[0].index('StudyName')]
                                name = datablob[k][datablob[0].index('Name')]
                                age = datablob[k][datablob[0].index('Age')]
                                gender = datablob[k][datablob[0].index('Gender')]
                               
                            if datablob[k][datablob[0].index('Joy Evidence')] != '':
                                joy1 += float(datablob[k][datablob[0].index('Joy Evidence')])
                                joy2 += float(datablob[k][datablob[0].index('Joy Intensity')])
                                ang1 += float(datablob[k][datablob[0].index('Anger Evidence')])
                                ang2 += float(datablob[k][datablob[0].index('Anger Intensity')])
                                sur1 += float(datablob[k][datablob[0].index('Surprise Evidence')])
                                sur2 += float(datablob[k][datablob[0].index('Surprise Intensity')])
                                fear1 += float(datablob[k][datablob[0].index('Fear Evidence')])
                                fear2 += float(datablob[k][datablob[0].index('Fear Intensity')])
                                cont1 += float(datablob[k][datablob[0].index('Contempt Evidence')])
                                cont2 += float(datablob[k][datablob[0].index('Contempt Intensity')])
                                disg1 += float(datablob[k][datablob[0].index('Disgust Evidence')])
                                disg2 += float(datablob[k][datablob[0].index('Disgust Intensity')])
                                sad1 += float(datablob[k][datablob[0].index('Sadness Evidence')])
                                sad2 += float(datablob[k][datablob[0].index('Sadness Intensity')])
                                neu1 += float(datablob[k][datablob[0].index('Neutral Evidence')])
                                neu2 += float(datablob[k][datablob[0].index('Neutral Intensity')])
                                pos1 += float(datablob[k][datablob[0].index('Positive Evidence')])
                                pos2 += float(datablob[k][datablob[0].index('Positive Intensity')])
                                neg1 += float(datablob[k][datablob[0].index('Negative Evidence')])
                                neg2 += float(datablob[k][datablob[0].index('Negative Intensity')])
                                counter_facet += 1
                                weight_counter_facet += 1
                            
                            if datablob[k][datablob[0].index('Raw Internal ADC A13 (Shimmer Sensor)')] != '':
                                raw_int += float(datablob[k][datablob[0].index('Raw Internal ADC A13 (Shimmer Sensor)')])
                                raw_gsr += float(datablob[k][datablob[0].index('Raw GSR (Shimmer Sensor)')])
                                calc_int += float(datablob[k][datablob[0].index('Cal Internal ADC A13 (Shimmer Sensor)')])
                                calc_gsr += float(datablob[k][datablob[0].index('Cal GSR (Shimmer Sensor)')])
                                counter_gsr += 1
                                weight_counter_gsr += 1
                
                    if counter_facet != 0:
                        joy1 /= float(counter_facet)
                        joy2 /= float(counter_facet)
                        ang1 /= float(counter_facet)
                        ang2 /= float(counter_facet)
                        sur1 /= float(counter_facet)
                        sur2 /= float(counter_facet)
                        fear1 /= float(counter_facet)
                        fear2 /= float(counter_facet)
                        cont1 /= float(counter_facet)
                        cont2 /= float(counter_facet)
                        disg1 /= float(counter_facet)
                        disg2 /= float(counter_facet)
                        sad1 /= float(counter_facet)
                        sad2 /= float(counter_facet)
                        neu1 /= float(counter_facet)
                        neu2 /= float(counter_facet)
                        pos1 /= float(counter_facet)
                        pos2 /= float(counter_facet)
                        neg1 /= float(counter_facet)
                        neg2 /= float(counter_facet)
                    
                    if counter_gsr != 0:    
                        raw_int /= float(counter_gsr)
                        raw_gsr /= float(counter_gsr)
                        calc_int /= float(counter_gsr)
                        calc_gsr /= float(counter_gsr)
                
                    line  =[study_name,name,age,gender,stimulus_name,aoi_field,joy1,joy2,ang1,ang2,sur1,sur2,fear1,fear2, \
                    cont1,cont2,disg1,disg2,sad1,sad2,neu1,neu2,pos1,pos2,neg1,neg2,raw_int,raw_gsr,calc_int,calc_gsr, \
                    counter_facet,0,counter_gsr,0]
                
                    final_blob.append(line)
                
                #point weight percentage calculation
                for i in range(len(final_blob)):
                    if final_blob[i][4] == stimulus_name:
                        final_blob[i][31] = (final_blob[i][30] / weight_counter_facet) * 100
                        final_blob[i][33] = (final_blob[i][32] / weight_counter_gsr) * 100
                
            self.full_AOI_complement[idx] = final_blob
            
            self.parent.ui.logOutput.append("      " + str(file.split('/')[-1]))
            print("      " + str(file.split('/')[-1]))
                    
        #data ready for output
        csv.register_dialect('CSV', delimiter='\t', quoting=csv.QUOTE_NONE)

        now = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
        output_file = os.getcwd() + "\\" + type.upper() + "_aggregation_" + now + ".txt"
        
        #the write-out
        with open(output_file, 'w',newline='') as f:
            writer = csv.writer(f, 'CSV')
            
            writer.writerow(self.full_AOI_complement[0][0])
            
            for i in range(len(self.full_AOI_complement)):
                for j in range(1,len(self.full_AOI_complement[i])):
                    writer.writerow(self.full_AOI_complement[i][j])