Esempio n. 1
0
    def addSubregions(self):
        if self.DataLoaded == False:
            messages.send_warning('No File loaded')
            return

        else:
            world_borders = {}
            for wb in self.WORLD_BORDERS:
                world_borders[wb] = self.INFO[wb]

            self.srW = subregionWindow(self, world_borders,
                                       self.SUBREGIONS_INFO_FILE)
            self.srW.show()
 def TE_ok_pressed(self):
     '''saved the user specific TE values to dictionary'''   
     te_params = json.load(open(self.TE_PARAMS_FILE))
     for le in self.TE_lineEdits: 
         try: 
             float(le.text())
         except ValueError: 
             messages.send_warning('Parameter fields can not be empty!')
         te_params[le.objectName()] = le.text()
     
     print(te_params)
     
     with open(self.TE_PARAMS_FILE, 'w') as tf: 
         json.dump(te_params, tf)
Esempio n. 3
0
 def checkSubregions(self):
     subregions = json.load(open(self.SUBREGIONS_INFO_FILE))
     for sub in subregions.keys():
         for wb in self.WORLD_BORDERS:
             if (wb.find('min') > -1) and (subregions[sub][wb] <
                                           self.INFO[wb]):
                 messages.send_warning(
                     'Currently selcted Subregions exceed world borders')
                 return False
             elif (wb.find('max') > -1) and (subregions[sub][wb] >
                                             self.INFO[wb]):
                 messages.send_warning(
                     'Currently selcted Subregions exceed world borders')
                 return False
             else:
                 return True
Esempio n. 4
0
    def changeTime(self):
        ''' when the 'Change'Button is clicked on the time Layout, this function opens a 
        window with sliders to set start and stop time. The selected values are passed back 
        to mainWindows INFO dictionary'''

        if self.DataLoaded == False:
            messages.send_warning('No File loaded')
            return

        csv_dict = json.load(open(self.CSV_INFO_FILE))
        delim = csv_dict['write']['delim']
        df = pd.read_csv(self.TMP_FILE, header=0, sep=delim)
        t = df['seconds'].values
        f = df['frames'].values

        self.tW = timeWindow(self, t, f, self.INFO)
        self.tW.show()
Esempio n. 5
0
    def pushed_ok(self): 
        ''' reads agent names from line edit elements, checks for validity (no empty fields, 
        no name starting with an integer, no double names) and sends the new names to parent window. '''
        self.AGENT_NAMES = []

        for le in self.custom_names: 
            text = le.text()

            if len(text) > 0: # don't allow empty names
                if text[0].isdigit(): # don't allow names to start with int
                    messages.send_warning("Invalid or empty agent names")
                    pass
                else: 
                    self.AGENT_NAMES.append(text)
            else: 
                messages.send_warning("Invalid or empty agent names")
                pass
                
        if len(list(set(self.AGENT_NAMES))) == self.nAgents: # no duplicates   
            print(self.AGENT_NAMES)     
            self.parentWindow.AGENT_NAMES = self.AGENT_NAMES   
            self.parentWindow.update_checklabels('AGENTS')
            self.home.close()      
        else: 
            messages.send_warning("Invalid or empty agent names")
Esempio n. 6
0
    def init_Info(self, tmp_file):
        ''' tableWindow calles this function, when 'Ok' is pressed. The tmp file created by table window is used to initialize
        information in mainWindow'''
        csv_dict = json.load(open(self.CSV_INFO_FILE))
        delim = csv_dict['write']['delim']

        self.TMP_FILE = tmp_file
        df = pd.read_csv(tmp_file, header=0, sep=delim)

        time_format = self.INFO['info']['time']
        if tp.handle_timestamp(df['time'].values[0], time_format,
                               self.DATE_FORMATS_FILE) == None:
            messages.send_warning('Time format invalid!')
            return False

        self.INFO['start_time'] = df['seconds'].values[0]
        self.INFO['stop_time'] = df['seconds'].values[-1]

        self.INFO['start_frame'] = df['frames'].values[0]
        self.INFO['stop_frame'] = df['frames'].values[-1]

        try:
            self.INFO['duration_time'] = self.INFO['start_time'] - self.INFO[
                'stop_time']
            self.INFO['duration_frame'] = int(df['frames'].values[-1]) - int(
                df['frames'].values[0])
        except (TypeError, ValueError) as e:
            return False

        self.INFO['x_min'] = min(
            min(df[an + '_x'].values) for an in self.INFO['agent_names'])
        self.INFO['y_min'] = min(
            min(df[an + '_y'].values) for an in self.INFO['agent_names'])
        self.INFO['x_max'] = max(
            max(df[an + '_x'].values) for an in self.INFO['agent_names'])
        self.INFO['y_max'] = max(
            max(df[an + '_y'].values) for an in self.INFO['agent_names'])

        self.update_labels()
        self.DataLoaded = True
Esempio n. 7
0
    def on_pushed_inspect(self):
        '''When the inspect Button is clicked this function cuts the data accoring to the current 
        temporal and spatial borders. Then the data is prepared for plotting depending on the chosen 
        format finally plotWindow is called for display.'''

        if self.DataLoaded == False:
            messages.send_warning('No File loaded')
            return

        df = basic_stats.cut_timelines(self.TMP_FILE, self.INFO,
                                       self.CSV_INFO_FILE)
        representation = str(self.selectType.currentText())
        datatype = str(self.selectSpec.currentText())
        data2plot = genPlotData.makeData2Plot(df, representation, datatype,
                                              self.INFO, self.PARAM_INFO_FILE)

        if data2plot == None:
            messages.send_warning('selection invalid')
            return
        else:
            self.pW = plotWindow(data2plot, representation, datatype)
            self.pW.exec_()
Esempio n. 8
0
 def save(self):
     ''' gets called by the save Button. Checks if selected columns are valid, if so, data is saved to a temporary csv file
     otherwise a warning is sent'''
     print(self.checkLabels)
     valid = self.check_entries()
     if valid:
         if self.build_csv(self.fileName) == False:
             return
         self.parentWindow.INFO['agent_names'] = self.AGENT_NAMES
         basic_stats.speed_and_dist(self.TMP_FILE_TITLE,
                                    self.parentWindow.INFO,
                                    self.CSV_INFO_FILE,
                                    self.PARAM_INFO_FILE)
         self.parentWindow.INFO['info'] = self.PARAM_INFO
         if self.parentWindow.init_Info(self.TMP_FILE_TITLE) == False:
             return
         #basic_stats.speed_and_dist(self.TMP_FILE_TITLE, self.parentWindow.INFO, self.CSV_INFO_FILE, self.PARAM_INFO_FILE)
         self.home.close()
     else:
         messages.send_warning(
             'Column selection invalid: Check for empty fields, double indices and indices exceeding size of original file'
         )
Esempio n. 9
0
    def apply_smoothing(self):
        ''' If a filter is selected in the dropdown menu and the 'Apply Smoothing' button is pressed, the respective 
        smoothing function is applied on the whole trajectory (not only the selected parts). '''

        if self.DataLoaded == False:
            messages.send_warning('No File loaded')
            return

        smooth = str(self.selectSmoothing.currentText())
        if smooth == None:
            pass
        else:
            csv_dict = json.load(open(self.CSV_INFO_FILE))
            delim = csv_dict['write']['delim']

            df = pd.read_csv(self.TMP_FILE, header=0, sep=delim)
            if smooth == 'MedFilter, k=5':
                for an in self.INFO['agent_names']:
                    df[an + '_x'] = smoothing.medfilt(df[an + '_x'].values)
                    df[an + '_y'] = smoothing.medfilt(df[an + '_y'].values)

                messages.send_info('Trajectory is now smooth !')
                self.INFO['filtered'] = True
Esempio n. 10
0
    def changeCoords(self):
        ''' when the 'Change'Button is clicked on the space Layout, this function changes the labels for x and y borders 
        to Line edits where individula changes can be made. The label of the 'Change' Button is set to ok and when presed, the 
        line edits are switched back to labels and the selected coordinate values are save d to the INFO dictionary.'''

        if self.DataLoaded == False:
            messages.send_warning('No File loaded')
            return

        for key in self.Border_sizes:
            self.INFO[key] = np.round(float(self.Border_sizes[key].text()), 2)
            self.spaceLayout.removeWidget(self.Border_sizes[key])
            sip.delete(self.Border_sizes[key])

        self.Border_sizes = {}

        if self.changeCoordsButton.text() == 'Change':

            for i, b in enumerate(self.WORLD_BORDERS):
                le = QtWidgets.QLineEdit(str(self.INFO[b]))
                le.setValidator(QtGui.QDoubleValidator())
                self.Border_sizes[b] = le
                self.spaceLayout.addWidget(le,
                                           np.floor(i / 2.) + 1,
                                           (i % 2) * 2 + 1)
            self.changeCoordsButton.setText('Ok')

        elif self.changeCoordsButton.text() == 'Ok':
            self.checkSubregions()
            for i, b in enumerate(self.WORLD_BORDERS):
                l = QtWidgets.QLabel(str(self.INFO[b]))
                self.Border_sizes[b] = l
                self.spaceLayout.addWidget(l,
                                           np.floor(i / 2.) + 1,
                                           (i % 2) * 2 + 1)
            self.changeCoordsButton.setText('Change')
Esempio n. 11
0
    def on_browse_clicked(self):
        ''' When the 'Browse' Button is clicked this function opens a File selection dialog, checks wheteher the selected file is 
        of correct type (csv) and writes the filepath to the upper line edit. Additionally the path to the selected datafile will be a
        added to the options dictionary, containing default paths. '''
        options = json.load(open(self.OPTIONS_INFO_FILE))
        data_folder = options['data_folder']

        if len(data_folder) == 0:
            data_folder = os.getenv("HOME")

        data_name = QtWidgets.QFileDialog.getOpenFileName(
            self, 'Open File', data_folder)[0]
        options['data_folder'] = data_name[:data_name.rfind('/')]

        with open(self.OPTIONS_INFO_FILE, 'w') as op:
            json.dump(options, op)

        if data_name.find('csv') < 0:
            messages.send_warning(
                'Not a valid file type. Please use a file of type .csv .')
        else:
            self.selectedFile.setText(data_name)
            self.INFO['data_file'] = data_name
            print(self.INFO['data_file'])
Esempio n. 12
0
    def pushed_ok(self): 
        ''' the subregion borders from line edit elements, checks for validity (no empty fields, 
        not exceeding world borders, min < max, ...) and dumps the results to the subregion file.
        Note that overlapping subregions are permitted. '''

        self.subRegions = {}
        
        for l in self.labels: 
            subreg = l.objectName()
            self.subRegions[subreg] = {}
            for x in self.Xs: 
                child = self.home.findChild(QtWidgets.QLineEdit, subreg + x)
                self.subRegions[subreg][x] = child.text()
        
        print(self.subRegions) 
         
        for key in self.subRegions:
            for x in self.Xs: 
            
                try: self.subRegions[key][x]= float(self.subRegions[key][x]) # check if all selcted fields are selected 
                
                except ValueError: 
                    messages.send_warning("{} has missing or invalid values".format(key))
                    return
                
                # check if selected subregions fall into world boundaries
                if (x.find('min') > -1) and (self.subRegions[key][x] < self.superRegion[x]): 
                    messages.send_warning("Borders of {}  exceed world boundaries".format(key)) 
                    return
                    
                elif (x.find('max') > -1) and (self.subRegions[key][x] > self.superRegion[x]): 
                    messages.send_warning("Borders of {}  exceed world boundaries".format(key)) 
                    return
                    
            # check if selected subregions are self consistent i.e. always min < max
            if (self.subRegions[key]['x_min'] > self.subRegions[key]['x_max']) or \
                        (self.subRegions[key]['y_min'] > self.subRegions[key]['y_max']): 
                        
                messages.send_warning("Borders of {} are inconsistent".format(key)) 
                return                 
        
        # save results to json and close application 
        with open(self.subregions_file, 'w') as sf: 
            json.dump(self.subRegions, sf) 
        
        self.home.close()
Esempio n. 13
0
    def open_data(self):
        ''' uses the parameters from self.CSV_INFO_FILE and tries to open the datafile with pandas. If NaN values are 
            detected, the user is informed and the corresponding rows will be ignored in processing. If an error occurs 
            while opening with pandas a warning is sent'''

        csv_dict = json.load(open(self.CSV_INFO_FILE))

        try:
            delim = csv_dict['read']['delim']
            skip_rows = csv_dict['read']['skip_rows']
            comment = csv_dict['read']['comment']
            df = pd.read_csv(self.fileName,
                             sep=delim,
                             comment=comment,
                             skiprows=skip_rows)
            vals = df.count(axis=1).values  # number of non-NaN values per row
            maxx = df.count(
                axis=1).max()  # maximum number of non-NaN values per row
            delete_rows = len(np.where(vals < maxx)[0])
            messages.send_info_detail(
                '(1)To speed up the display process only the first 100 rows of data will be shown. \n(2)NaN values have been detected in {} rows. These rows will be ignored in the following process.'
                .format(str(delete_rows)),
                detail='Indices of ignored rows: {}'.format(
                    np.where(vals < maxx)[0]))
            df = df.dropna(how='any')
            df = df.head(100)

        except pd.errors.ParserError:
            messages.send_warning(
                "There seems to be a problem with the file you're trying to open.\n\n \
            This is usually due to missing values. Please delete incomplete rows and try again."
            )
            return
            self.close()

        return df
Esempio n. 14
0
    def stats_and_save(self):
        ''' when clickig the 'Stats and Save' Button this function calles the stats and save method 
        from basic stats which creates two csv files: one with timelines and another with single values 
        (like mean, var, borders, etc.). Then a File selection dialog is opened and at the chosen 
        location a folder is created where the results are saved. Finally a 'goodbye' dialog is sent
        informing the user where results were saved and asking wheter they want to continue or close 
        the application'''

        if self.DataLoaded == False:
            messages.send_warning('No File loaded')
            return

        # This is simply to show the bar
#        self.progressBar.setGeometry(30, 40, 200, 25)
#        self.progressBar.setValue(0)
#        self.statusBar().addPermanentWidget(self.progressBar)
#-------------------------------------------------------------------------------------------
# saving preparation section
#-------------------------------------------------------------------------------------------

#check if subregions are in agreement with current world_borders
        if self.checkSubregions() == False:
            return

        options = json.load(open(self.OPTIONS_INFO_FILE))

        # read prefered save folder from options file, default is HOME
        save_folder = options['save_folder']
        if len(save_folder) == 0:
            save_folder = os.getenv("HOME")

        # open prefered save folder (or HOME) and let user confirm
        results_folder_super = \
             str(QtWidgets.QFileDialog.getExistingDirectory(self, "Select Directory", save_folder, QtWidgets.QFileDialog.ShowDirsOnly))

        if results_folder_super == None or len(results_folder_super) == 0:
            print('saving procedure interrupted')
            return

        options['save_folder'] = results_folder_super

        # save new prefered save folder
        with open(self.OPTIONS_INFO_FILE, 'w') as of:
            json.dump(options, of)

        # create results folder within save folder
        results_folder = self.makeResultsDir(results_folder_super + '/')

        # read parameters for saving from options file
        csv_dict = json.load(open(self.CSV_INFO_FILE))
        delim = csv_dict['write']['delim']
        time_file = options['timeline_file']

        #-------------------------------------------------------------------------------------------
        # create timelines.csv and info.csv
        #-------------------------------------------------------------------------------------------

        # cut data dataframe according to current time and space settings
        df = basic_stats.cut_timelines(self.TMP_FILE, self.INFO,
                                       self.CSV_INFO_FILE)

        # sort the columns of dataframe
        indiv_stats = ['_vx', '_vy', '_speed']
        coll_stats = ['_dist']
        time = ['frames', 'time', 'seconds']
        agents = self.INFO['agent_names']
        specs = ['_x', '_y', '_angle']

        cols = df.columns
        new_order = []
        for t in time:
            new_order.append(t)

        for a in agents:
            for sp in specs:
                new_order.append(a + sp)
            for in_st in indiv_stats:
                new_order.append(a + in_st)
        for c in coll_stats:
            for col in cols:
                if col.find(c) > 0:
                    new_order.append(col)

        df = df.reindex(new_order, axis=1)

        # save ordered dataframe and create info.csv
        df.to_csv(results_folder + '/' + time_file, sep=delim)

        #genStats.makeFile(results_folder, results_folder + '/' + time_file, self.INFO, self.CSV_INFO_FILE, self.OPTIONS_INFO_FILE)
        genStats.makeFile(results_folder, df, self.INFO, self.CSV_INFO_FILE,
                          self.OPTIONS_INFO_FILE)

        #self.progressBar.setValue(20)
        #-------------------------------------------------------------------------------------------
        # plotting section
        #-------------------------------------------------------------------------------------------
        plot_instructions = options['plot_selection']
        pf.plot_things(df, results_folder, self.INFO['agent_names'],
                       plot_instructions)

        #self.progressBar.setValue(30)
        #-------------------------------------------------------------------------------------------
        # TE section
        #-------------------------------------------------------------------------------------------
        if options['TE']:
            if len(self.INFO['agent_names']) == 2:
                te_params = json.load(open(self.TE_PARAMS_FILE))

                TE_done = TE.TE(results_folder + '/' + time_file,
                                results_folder,
                                '/TE.csv',
                                '/TE.jpg',
                                start_frame=te_params['start_frame'],
                                maxtime=te_params['max_time'],
                                k_te=te_params['k_te'],
                                frame_step=te_params['frame_step'])
            else:
                messages.send_warning(
                    'Cannot calculate transfer entropy for agent number other than 2.'
                )

        #self.progressBar.setValue(70)
        #-------------------------------------------------------------------------------------------
        # Subregions section
        #-------------------------------------------------------------------------------------------

        sub_info = {'start_frame':self.INFO['start_frame'], 'stop_frame':self.INFO['stop_frame'],\
                    'start_time':self.INFO['start_time'], 'stop_time':self.INFO['stop_time'],\
                    'agent_names':self.INFO['agent_names'], 'data_file':self.INFO['data_file'], 'filtered':self.INFO['filtered']}
        subregions = json.load(open(self.SUBREGIONS_INFO_FILE))
        for sub in subregions.keys():
            for wb in self.WORLD_BORDERS:
                sub_info[wb] = subregions[sub][wb]

            sub_df = basic_stats.cut_timelines(self.TMP_FILE, sub_info,
                                               self.CSV_INFO_FILE)

            genStats.makeFile(os.getcwd(),
                              sub_df,
                              sub_info,
                              self.CSV_INFO_FILE,
                              self.OPTIONS_INFO_FILE,
                              region=sub)

            info1 = pd.read_csv(results_folder + '/info.csv')
            info2 = pd.read_csv('info.csv')
            infofiles = [info1, info2]
            combined = pd.concat(infofiles)
            combined = combined[info1.columns]
            combined = combined.dropna(axis=1)
            combined.to_csv(results_folder + '/info.csv')

        #self.progressBar.setValue(100)


#            from scipy.stats import spearmanr
#            from scipy.stats import pearsonr
#            for i in range(len(self.INFO['agent_names'])):
#                for j in range(i+1, len(self.INFO['agent_names'])):
#                    a0 = self.INFO['agent_names'][i]
#                    a1 = self.INFO['agent_names'][j]
#
#
#                    print(a0 + ''+ a1 )
#                    print('Pearson Corr Speed:  ', pearsonr(df[a0 + '_speed'].values, df[a1 + '_speed'].values))
#                    print('Pearson Corr vx:  ', pearsonr(df[a0 + '_vx'].values, df[a1 + '_vx'].values))
#                    print('Pearson Corr vy:  ', pearsonr(df[a0 + '_vy'].values, df[a1 + '_vy'].values))
#                    print('Np.Corr Speed: ', np.correlate(df[a0 + '_speed'].values, df[a1 + '_speed'].values))
#
#                    print('Pearson x: ', pearsonr(df[a0 + '_x'].values, df[a1 + '_x'].values))
#                    print('Np.Corr x: ', np.correlate(df[a0 + '_x'].values, df[a1 + '_x'].values))
        messages.send_goodbye(self, results_folder)
Esempio n. 15
0
    def on_load_clicked(self):
        ''' When the 'Load' Button is clicked the data file specified in the line edit nest to the 
        Browse Buttonis loaded for further processing. If view is checked this is done via the display 
        in table window. Else the loading is done internally. In any case csv parameters are read from 
        the respective line edits in mainWindow and and saved to json for further use.'''

        # check if file was selected
        if len(self.selectedFile.text()) == 0:
            messages.send_warning('No File loaded')
            return

        # read csv parameters and save to json
        csv_dict = json.load(open(self.CSV_INFO_FILE))
        csv_dict['read']['delim'] = self.setDelim.text()
        csv_dict['read']['skip_rows'] = int(self.setSkipRows.text())

        with open(self.CSV_INFO_FILE, 'w') as fp:
            json.dump(csv_dict, fp)

        # save the state of the view button
        options = json.load(open(self.OPTIONS_INFO_FILE))
        options['view'] = self.openInView.checkState()
        print(options)
        with open(self.OPTIONS_INFO_FILE, 'w') as fp:
            json.dump(options, fp)

        # if view is checked open tableWindow
        if self.openInView.checkState() == 2:
            self.table = tableWindow(self, self.INFO['data_file'])
            self.table.show()

        else:
            #-------------------------------------------------------------------------------------------
            # in this section the file is loaded using settings from previous uses. The procedure is
            # similar to tableWindow's build_csv()-function.
            #------------------------------------------------------------------------------------- -----
            # use column labels from previos session
            columns = json.load(open(self.PARAM_INFO_FILE))
            time_labels = columns['time_labels']
            agent_names = columns['agent_names']
            agent_specs = columns['agent_specifications']
            other = columns['other']

            # use csv params from mainWindow to load selected csv into pandas
            try:
                df = pd.read_csv(self.selectedFile.text(),
                                 header=0,
                                 sep=csv_dict['read']['delim'],
                                 skiprows=csv_dict['read']['skip_rows'],
                                 comment=csv_dict['read']['comment'])
            except ValueError:
                messages.send_warning(
                    "Something went wrong ! \nPlease enable 'view' and try again."
                )
                return

            # define columns
            df_columns = {
                'TIME': {
                    time_labels[i]: columns[time_labels[i]]
                    for i in range(len(time_labels))
                },
                'AGENTS': {},
                'OTHER':
                {other[i]: columns[other[i]]
                 for i in range(len(other))}
            }

            for k in range(len(agent_names)):
                for j in range(len(agent_specs)):
                    key = agent_names[k] + agent_specs[j]
                    df_columns['AGENTS'][key] = columns[key]

            # read the indices and names of columns of interest into a list
            real_indices = []
            titles = []
            for key1 in df_columns.keys():
                for key2 in df_columns[key1].keys():
                    titles.append(key2)
                    real_indices.append(int(df_columns[key1][key2]) - 1)

            # try to select the columns of interest in the loaded pandas
            try:
                df_new = df.iloc[:, real_indices]
            except IndexError:
                messages.send_warning(
                    "Something went wrong ! \nPlease enable 'view' and try again."
                )
                return

            # check for NaN values and delete the respective rows
            df_new.columns = titles
            df_new = df_new.dropna(how='any')

            # save pandas to temporary csv file
            df_new.to_csv(self.TMP_FILE, sep=csv_dict['write']['delim'])
            print('temporary file saved to', self.TMP_FILE)

            # perform basic_stats i.e convert time to seconds and calc speed and dist
            self.INFO['agent_names'] = agent_names
            try:
                basic_stats.speed_and_dist(self.TMP_FILE, self.INFO,
                                           self.CSV_INFO_FILE,
                                           self.PARAM_INFO_FILE)
            except TypeError:
                messages.send_warning(
                    "Something went wrong ! \nPlease enable 'view' and try again."
                )
                return

            # init the display of mainWindow and self.INFO with values read from datafile
            self.INFO['info'] = columns['info']
            if self.init_Info(self.TMP_FILE) == False:
                messages.send_warning(
                    "Something went wrong ! \nPlease enable 'view' and try again."
                )
                return
Esempio n. 16
0
    def build_csv(self, fileName):
        ''' uses the selected columns to build a temporary pandas frame which is saved to .csv under a default name.'''
        header_dict = {}
        csv_dict = json.load(open(self.CSV_INFO_FILE))
        delim = csv_dict['read']['delim']
        skip_rows = csv_dict['read']['skip_rows']
        comment = csv_dict['read']['comment']

        for key in self.checkLabels.keys():
            for k in self.checkLabels[key].keys():
                header_dict[k] = self.checkLabels[key][k]

        df = pd.read_csv(fileName,
                         header=None,
                         sep=delim,
                         skiprows=skip_rows,
                         comment=comment,
                         names=[str(i) for i in range(self.nColumns)])

        real_indices = [str(int(cl) - 1) for cl in header_dict.values()]
        df_new = df.loc[:, real_indices]
        df_new.columns = list(header_dict.keys())
        df_new = df_new.dropna(how='any')

        #print(header_dict)

        if type(df_new['frames'].values[0]) == str:  #drop original header
            df_new = df_new.drop(df.index[0])

        # check for invalid values in selcted columns
        should_be_floats = ['_x', '_y', '_angle']
        for an in self.AGENT_NAMES:
            for sbf in should_be_floats:
                if not self.is_float(df_new[an + sbf].values[3]):
                    messages.send_warning(
                        'Data type of column {} not understood'.format(an +
                                                                       sbf))
                    return False

        if not self.is_float(df_new['frames'].values[3]):
            messages.send_warning('Data type of column frames not understood')
            return False

        # save to tmp
        delim = csv_dict['write']['delim']
        df_new.to_csv(self.TMP_FILE_TITLE, sep=delim)
        print('temporary file saved to', self.TMP_FILE_TITLE)

        #save paramter settings to json
        param_dict = {}
        param_dict['agent_names'] = self.AGENT_NAMES
        param_dict['time_labels'] = self.TIME_LABELS
        param_dict['agent_specifications'] = self.AGENT_DATA
        param_dict['other'] = self.OTHER
        param_dict['info'] = self.PARAM_INFO

        for key1 in self.checkLabels:
            for key2 in self.checkLabels[key1]:
                param_dict[key2] = self.checkLabels[key1][key2]

        with open(self.PARAM_INFO_FILE, 'w') as fp:
            json.dump(param_dict, fp)