Ejemplo n.º 1
0
def make_toml(folder):
    files = []
    extlist = ["mpt", "csv", "txt", "xlsx", "ecdh"]


    #Loop over all elements in current directory
    for entry in os.listdir(folder):
        # If it is a file and has the correct extension
        if os.path.isfile(entry) and entry.split(".")[-1] in extlist:
            # Add it to filelist
            files.append(entry)

    toml_str = """# "Data path", "active mass" and "nickname" (set active mass to 1 if data is normalized with regards to active mass)
files = [\n"""
    if len(files)>0:
        for file in files:
            toml_str += '\t["' + file + '","1.0"],\n'
    else:
        LOG.warning("Could not find any files in the current folder!")
        toml_str += '\t[" ","1.0"],\n'

    toml_str += "]\n\n"
    toml_str += gen_string(cfg_dict)

    with open("ecdh.toml", "w") as f:
        f.write(toml_str)
        LOG.info("Wrote example configuration to 'ecdh.toml' with %.0f files found"%len(files))
Ejemplo n.º 2
0
Archivo: cell.py Proyecto: amundmr/ecdh
    def reduce_data(self, datatreatment):
        """
        Takes the data and deletes points not matching the requirement.
        Useful if you measured with too high sampling rate and have gigantic datafiles
        Example: you sampled for every 1mV change, and every second, then with dV = 0.010 and dt = 10, all points that has less than a difference of 10mV AND less than 10s will be deleted.
        """
        LOG.debug("cell.reduce_data() is running, reducing file: {}".format(
            self.name))
        LOG.info(
            "Reducing file: {}. Follow loading bar below, this might take some time."
            .format(self.name))

        try:
            dt = int(datatreatment["dt"])
        except:
            dt = 10
        try:
            dV = float(datatreatment["dV"])
        except:
            dV = 0.01

        #dt = 10 #10 s
        #dV = 0.01 # 10mV
        dI = 0.01  #0.01 mA, 10uA
        LOG.debug(f"Reduction parameters: dt: {dt}, dV: {dV}, dI: {dI}.")
        #'time/s','Ewe/V', '<I>/mA'
        last_t = self.df['time/s'].iloc[0]
        last_V = self.df['Ewe/V'].iloc[0]
        kill_index = []

        max_i = len(self.df)
        loadbar = 10
        print("|----------|")
        print("|", end="", flush=True)
        for i, row in self.df.iloc[1:].iterrows():

            curr_t = row.iloc[0]
            curr_V = row.iloc[1]

            if abs(last_t - curr_t) < dt and abs(last_V - curr_V) < dV:
                kill_index.append(i)
            else:
                last_t = curr_t
                last_V = curr_V

            if i == int(max_i / loadbar):
                print("-", end="", flush=True)
                loadbar -= 1
                if loadbar < 1:
                    loadbar += 1  #Wow this needs fixing
        print("|")

        filename, ext = os.path.splitext(self.fn)
        self.df.drop(kill_index, inplace=True)
        self.df.to_csv(path_or_buf=filename + "_reduced.ecdh")
Ejemplo n.º 3
0
Archivo: cell.py Proyecto: amundmr/ecdh
    def smooth_data(self, datatreatment):
        """
        Takes the data and removes outliers
        Useful if you have a couple of short circuits now and then
        
        """
        LOG.info(f"Removing outliers on {self.name}.")
        import pandas as pd
        window = int(len(self.df) / 10000)
        smoothing_df = pd.DataFrame()
        smoothing_df['median'] = self.df['Ewe/V'].rolling(window).median()
        smoothing_df['std'] = self.df['Ewe/V'].rolling(window).std()

        expmode = self.df.experiment_mode
        #filter setup
        self.df = self.df[(self.df['Ewe/V'] <= smoothing_df['median'] +
                           3 * smoothing_df['std'])
                          & (self.df['Ewe/V'] >= smoothing_df['median'] -
                             3 * smoothing_df['std'])].ewm(alpha=0.9).mean()
        self.df.experiment_mode = expmode
Ejemplo n.º 4
0
def run():
    import sys
    if len(sys.argv) < 3: #Then no folder is specified, look for toml in local folder.
        if os.path.isfile("./ecdh.toml"):
            path = "./ecdh.toml"
        else:
            LOG.error("Could not find an ecdh.toml file in the current directory! \nOptions: \n1. Run ecdh with the argument 'run' followed by the path of your .toml file. \n2. Initiate toml file in this directory with the init argument.")
            sys.exit()
    elif os.path.isfile(sys.argv[2]):# File was inserted Read toml config
        path = sys.argv[2]
    else:
        LOG.error("Cannot find the .toml configuration file!")
        sys.exit()
    LOG.debug("Reading config file: '{}'".format(path))

    # Read in configuration file
    config = read_config(path)
    settings = config["settings"]
    # Merge cycle range into specific cycles
    if settings['cycle_range']:
        try:
            cyclerange = np.linspace(settings['cycle_range'][0], settings['cycle_range'][1], settings['cycle_range'][1]-settings['cycle_range'][0] + 1).astype(int)
            if type(settings['specific_cycles']) is bool:
                settings['specific_cycles'] = cyclerange.tolist()
            else:
                settings['specific_cycles'] += cyclerange.tolist()
            LOG.info(f"Specific cycles: {settings['specific_cycles']}")
        except Exception as e:
            LOG.warning(f"Could not use the cycle range, Error: {e}")


    datatreatment = config["datatreatment"]

    # Check that files are found
    files = check_files(config["files"])
    if len(files) == 0:
        import sys
        LOG.error("Could not load any datafiles. Exiting. Check that the filepaths are typed correctly in the configuration file.")
        sys.exit()
    LOG.success("Running ECDH: Found {} datafiles!".format(len(files)))

    # Define plot specifications
    plot = Plot(numfiles=len(files), **settings)


    # Run the data reading + plot generation
    cells = []
    for f in files:
        try:
            am_mass = f[1]
        except:
            am_mass = None
        try:
            nickname = f[2]
        except:
            nickname = None

        cell = Cell(f[0], am_mass, nickname,  plot=plot, specific_cycles = settings['specific_cycles'])
        cell.get_data()
        #cell.edit_GC()
        #cell.treat_data(settings)
        cell.plot()
        #cells.append(cell)
        
        if datatreatment['reduce_data']:
            cell.reduce_data(datatreatment)

        if datatreatment['smooth_data']:
            cell.smooth_data(datatreatment)
        
        if datatreatment['print_capacities']:

            if not os.path.isfile("capacity_intervals.json"):
                with open("capacity_intervals.json", "w") as f:
                    f.close()

            import json
 
            new_json = cell.get_capacities(datatreatment)

            with open("capacity_intervals.json",'r+') as file:
                # First we load existing data into a dict.
                try:
                    file_data = json.load(file)
                except:
                    file_data = []
                # Join new_data with file_data inside emp_details
                file_data.append(new_json)
                # Sets file's current position at offset.
                file.seek(0)
                # convert back to json.
                json.dump(file_data, file, indent = 4)
            
            
            
        


    if 'savefig' in settings:
        plot.draw(save = settings['savefig'])
    else:
        plot.draw()
Ejemplo n.º 5
0
def check_df(df):
    """Check if the dataframe has:
    - cycle number (sometimes the Z1 counter f***s up and just says 0)
    - If it is GC data: then calculate capacity
    - removing data where the cell is resting
    
    If anything is wrong, it tries to fix it"""


    if df.experiment_mode == 1: #Then its GC
        if df['cycle number'].eq(0).all() or df['cycle number'].max() < 100: #If all cycle numbers are 0, then maybe Z1 counter was not iterated properly.
            LOG.info("We only found one cycle in '{}', and suspect this to be false. Checking now if there should be more cycles.".format(df.name))

            #We fix this by counting our own cycles.
            #Keeping track of signs of current (positive or negative) and cycle number
            prev_sign = True
            sign = True 
            cycle_number = 1
            new_cycle_indexes = []

            for i,current in df['<I>/mA'].items():

                if current > 0:
                    sign = True
                    df['charge'].at[i] = True
                elif current < 0:
                    sign = False
                    df['charge'].at[i] = False
                

                if prev_sign is False and sign is True:
                    #Changing from a discharge to a charge means new cycle
                    prev_sign = True
                    cycle_number += 1
                    #df['cycle number'].at[i-1] = cycle_number
                    new_cycle_indexes.append(i-1)
                    
                elif prev_sign is True and sign is False:
                    #Changing from a charge to a discharge
                    prev_sign = False
                    new_cycle_indexes.append(i-1)

                # In place editing of cycle number
                df['cycle number'].at[i] = cycle_number

            #Remove rows where a new experiment start (BatSmall has f****d datalogging here, where the current and voltage is the same as the prev step, but the capacity restarts)
            #df.drop(new_cycle_indexes, axis = 0, inplace = True)

            if cycle_number > 1:
                LOG.info("Found {} cycles in {}".format(cycle_number, df.name))
            else:
                LOG.info("There seems to only be one cycle. Sorry for wasting your time.")

        else: #charge bool then isnt fixed
            for i,current in df['<I>/mA'].items():

                if current > 0:
                    sign = True
                    df['charge'].at[i] = True
                elif current < 0:
                    sign = False
                    df['charge'].at[i] = False
Ejemplo n.º 6
0
Archivo: cell.py Proyecto: amundmr/ecdh
    def get_capacities(self, datatreatment):
        """
        Returns capacity in a potential interval for each cycle.
        Appends to filename "./capacities.txt"
        """
        if not len(datatreatment['print_capacities']) % 2 == 0:
            LOG.critical(
                "You wanted to get the potential-interval capacities, but you inserted an odd number of boundaries. You need two boundaries for each range, eg: [3.5, 4.4, 4.4, 5.0] will be two ranges from 3.5->4.4 and 4.4->5.0. The capacities will not be printed."
            )
        else:
            intervals = []
            for i, pot in enumerate(datatreatment['print_capacities']):
                if i % 2 != 0:  #if the index number is odd, we are making a range of it
                    intervals.append(
                        (datatreatment['print_capacities'][i - 1], pot))
            LOG.info(
                "Found intervals for capacity print: {}".format(intervals))

            if self.GCdata:
                data = self.GCdata
            elif self.CVdata:
                data = self.CVdata
            else:
                self.edit_GC()

            import numpy as np

            def _find_charge(chg, interval):
                start, stop = interval
                cap, pot = chg
                pot = np.asarray(pot)
                idxstart = (np.abs(pot - start)).argmin()
                idxstop = (np.abs(pot - stop)).argmin()
                capdiff = cap[idxstop] - cap[idxstart]

                return abs(capdiff)

            # Format list like: [chg, dchg], chg = [cap in interval 1, cap in interval 2, cap in interval n]
            caps = {self.name: {}}
            for interval in intervals:
                caps[self.name][str(interval)] = {
                    "charge": [],
                    "discharge": []
                }

            if self.specific_cycles:
                for i, cycle in enumerate(data):
                    if i in self.specific_cycles:
                        chg, dchg = cycle
                        for interval in intervals:
                            caps[self.name][str(interval)]["charge"].append(
                                _find_charge(chg, interval))
                            caps[self.name][str(interval)]["discharge"].append(
                                _find_charge(dchg, interval))

            else:
                for i, cycle in enumerate(data):
                    chg, dchg = cycle
                    for interval in intervals:
                        caps[self.name][str(interval)]["charge"].append(
                            _find_charge(chg, interval))
                        caps[self.name][str(interval)]["discharge"].append(
                            _find_charge(dchg, interval))

            return caps