Esempio n. 1
0
def damage_soderberg(m, R):
    d = 0.
    I = 0.25 * np.pi * R**4
    y = R
    sigma = m * y / I

    print max(sigma)
    c = rainflow.extract_cycles(sigma)

    mean = np.array([])
    alternate = np.array([])
    m = np.array([])
    for low, high, mult in c:
        mean = np.append(mean, 0.5 * (high + low))
        alternate = np.append(alternate, (high - low) / 2.)
        m = np.append(m, mult)

    Smax = 80.0 * 1.E6
    ex = 4.
    sig_eff = alternate * (Smax / (Smax - mean))
    # sig_eff = (1.-mean*Smax)/alternate
    Nf = (Smax / sig_eff)**ex

    for i in range(len(mean)):
        d += m[i] / Nf[i]

    print d * 52560.0 * 20.
Esempio n. 2
0
def test_extract_cycles(series, cycles, counts, approx):
    result = list(rainflow.extract_cycles(series))
    if approx:
        expected = [(pytest.approx(rng), pytest.approx(mean), count, i, j)
                    for (rng, mean, count, i, j) in cycles]
    else:
        expected = cycles
    assert result == expected
Esempio n. 3
0
 def count(self):
     for spot in self.spotNames:
         self.rainflowData[spot] = {'Cycle': [], 'Range': [], 'Mean': []}
         for valley, peak, cycle in rainflow.extract_cycles(
                 self.dataInput[spot]):
             rangeValue = peak - valley
             meanValue = (peak + valley) / 2
             self.rainflowData[spot]['Cycle'].append(cycle)
             self.rainflowData[spot]['Range'].append(rangeValue)
             self.rainflowData[spot]['Mean'].append(meanValue)
Esempio n. 4
0
def _calc_damage(data):
    stress_ranges = []
    cycles = []
    for low, high, mult in rainflow.extract_cycles(data, True, True):
        cycles.append(mult)
        amplitude = high - 0.5 * (high + low)
        if amplitude > 0: stress_ranges.append(2 * amplitude)

    N = sn_curve(stress_ranges, logA=math.log10(6e10), m=3, t=0, tref=25, k=0)
    damage = sum(sorted(cycles / N))
    return damage
Esempio n. 5
0
def _calc_damage(data, sn_curve):
    stress_ranges = []
    cycles = []
    for low, high, mult in rainflow.extract_cycles(data, True, True):
        amplitude = high - 0.5 * (high + low)
        if amplitude > 0:
            cycles.append(mult)
            stress_ranges.append(2 * amplitude)

    N = sncurve(stress_ranges, **sn_curve)
    damage = sum(sorted(cycles / N))
    return damage
Esempio n. 6
0
    def test_order_of_remaining_halves(self):
        cycle_ref = [0.5, 0.5, 0.5, 1.0, 0.5, 0.5, 0.5, 0.5, 0.5]
        mean_ref = [-1.0, -0.5, -1.0, 1.0, 1.0, 0.5, 0.0, 1.0, -1.0]

        cycles = []
        means = []
        for low, high, mult in rainflow.extract_cycles(self.series, True, True):
            cycles.append(mult)
            mean = 0.5 * (high + low)
            means.append(mean)

        self.assertEqual(cycles, cycle_ref)
        self.assertEqual(means, mean_ref)
Esempio n. 7
0
 def count(self):
     ''' Rainflow counting spot by spot
     '''
     for spot in self.spots:
         self.rainflowData[spot] = {'Cycle':collections.deque(),
                                    'Range':collections.deque(),
                                    'Mean':collections.deque()}
         for valley, peak, cycle in rainflow.extract_cycles(self.dataInput.get(spot)['Records']):
             rangeValue = peak - valley
             meanValue = (peak+valley)/2
             self.rainflowData[spot]['Cycle'].append(cycle)
             self.rainflowData[spot]['Range'].append(rangeValue)
             self.rainflowData[spot]['Mean'].append(meanValue)
Esempio n. 8
0
 def test_lows_and_highs_sorted(self):
     self.assertTrue(
         all(low <= high
             for low, high, mult in rainflow.extract_cycles(self.series)))
Esempio n. 9
0
    def calc_degradation(self, opt_period, start_dttm, last_dttm):
        """ calculate degradation percent based on yearly degradation and cycle degradation

        Args:
            opt_period: the index of the optimization that occurred before calling this function, None if
                no optimization problem has been solved yet
            start_dttm (DateTime): Start timestamp to calculate degradation. ie. the first datetime in the optimization
                problem
            last_dttm (DateTime): End timestamp to calculate degradation. ie. the last datetime in the optimization
                problem

        A percent that represented the energy capacity degradation
        """

        # time difference between time stamps converted into years multiplied by yearly degrate rate
        if self.incl_cycle_degrade:
            cycle_degrade = 0
            yearly_degradation = 0

            if not isinstance(opt_period, str):
                # calculate degradation due to cycling iff energy values are given
                energy_series = self.variables_df.loc[start_dttm:last_dttm,
                                                      'ene']
                # Find the effective energy capacity
                eff_e_cap = self.degraded_energy_capacity()

                #If using rainflow counting package uncomment following few lines
                # use rainflow counting algorithm to get cycle counts
                # cycle_counts = rainflow.count_cycles(energy_series, ndigits=4)
                #
                # aux_df = pd.DataFrame(cycle_counts, columns=['DoD', 'N_cycles'])
                # aux_df['Opt window'] = opt_period
                #
                # # sort cycle counts into user inputed cycle life bins
                # digitized_cycles = np.searchsorted(self.cycle_life['Cycle Depth Upper Limit'],[min(i[0]/eff_e_cap, 1) for i in cycle_counts], side='left')

                # use rainflow extract function to get information on each cycle
                cycle_extract = list(rainflow.extract_cycles(energy_series))
                aux_df = pd.DataFrame(
                    cycle_extract,
                    columns=['rng', 'mean', 'count', 'i_start', 'i_end'])
                aux_df['Opt window'] = opt_period

                # sort cycle counts into user inputed cycle life bins
                digitized_cycles = np.searchsorted(
                    self.cycle_life['Cycle Depth Upper Limit'],
                    [min(i[0] / eff_e_cap, 1) for i in cycle_extract],
                    side='left')
                aux_df['Input_cycle_DoD_mapping'] = np.array(
                    self.cycle_life['Cycle Depth Upper Limit']
                    [digitized_cycles] * eff_e_cap)
                aux_df['Cycle Life Value'] = np.array(
                    self.cycle_life['Cycle Life Value'][digitized_cycles])

                self.counted_cycles.append(aux_df.copy())
                # sum up number of cycles for all cycle counts in each bin
                cycle_sum = self.cycle_life.loc[:, :]
                cycle_sum.loc[:, 'cycles'] = 0
                for i in range(len(cycle_extract)):
                    cycle_sum.loc[digitized_cycles[i],
                                  'cycles'] += cycle_extract[i][2]

                # sum across bins to get total degrade percent
                # 1/cycle life value is degrade percent for each cycle
                cycle_degrade = np.dot(
                    1 / cycle_sum['Cycle Life Value'],
                    cycle_sum.cycles) * (1 - self.eol_condition)

            if start_dttm is not None and last_dttm is not None:
                # add the yearly degradation linearly to the # of years from START_DTTM to (END_DTTM + dt)
                days_in_year = 366 if is_leap_yr(start_dttm.year) else 365
                portion_of_year = (
                    last_dttm + pd.Timedelta(self.dt, unit='h') -
                    start_dttm) / pd.Timedelta(days_in_year, unit='d')
                yearly_degradation = self.yearly_degrade * portion_of_year

            # add the degradation due to time passing and cycling for total degradation
            degrade_percent = cycle_degrade + yearly_degradation

            # record the degradation
            # the total degradation after optimization OPT_PERIOD must also take into account the
            # degradation that occurred before the battery was in operation (which we saved as SELF.DEGRADE_PERC)
            self.degrade_data.loc[
                opt_period,
                'degradation progress %'] = degrade_percent + self.degrade_perc
            self.degrade_perc += degrade_percent

            soh_new = self.soh_initial - self.degrade_perc
            self.soh = self.degrade_data.loc[opt_period,
                                             'state of health %'] = soh_new

            # apply degradation to technology (affects physical_constraints['ene_max_rated'] and control constraints)
            eff_e_cap = self.degraded_energy_capacity()
            TellUser.info(
                f"BATTERY - {self.name}: effective energy capacity is now {truncate_float(eff_e_cap)} kWh "
                +
                f"({truncate_float(100*(1 - (self.ene_max_rated-eff_e_cap)/self.ene_max_rated), 7)}% of original)"
            )
            self.degrade_data.loc[
                opt_period, 'effective energy capacity (kWh)'] = eff_e_cap
            self.effective_soe_max = eff_e_cap * self.ulsoc
            self.effective_soe_min = eff_e_cap * self.llsoc
def WeldFat(timestamp, _componentId, json_input, response):
    """Iterate cycles in the series.

    Parameters
    ----------
    timestamp (datetime.datetime): 
    _componentId (string): "e9fafc85-5f4d-422e-8988-6545890f202c"
    jsonjson_obj (string):  
    Returns
    ------
    res_dict dictionary as json
    
    res_dict['cumulative_damage']
    res_dict['safety_factor_life_per_bin']
    res_dict['equivalent_stress_range']
    res_dict['safety_factor_stress']
    res_dict['rst'] : Fatigue result per bin as below
        res_dict['rst'][bin]['life']
        res_dict['rst'][bin]['log10_life']
        res_dict['rst'][bin]['damage_per_bin']
        res_dict['rst'][bin]['safety_factor_life_per_bin']
        
    """

    # read json json_objs

    json_obj = {}
    json_obj = json.loads(json_input)

    # Define unit conversion from user units to stress_unit_dict units

    #stress_unit = json_obj['stress_unit']
    stress_unit_dict = {
        'mpa': 1.0,
        'psi': 145.038,
        'ksi': 0.145038
    }  #Mpa to user unit
    #conv_stress = stress_unit_dict[stress_unit.lower()] #convert MPa to user unit

    # Get S-N Curve definition parameters

    if 'class' not in json_obj['fatigue_class']:
        fat_class = 'User defined'
        fat = json_obj['fatigue_class']['fat']
        n_fat = json_obj['fatigue_class']['n_fat']
        n_c = json_obj['fatigue_class']['n_c']
        m_1 = json_obj['fatigue_class']['m_1']
        m_2 = json_obj['fatigue_class']['m_2']
    else:
        fat_class = json_obj['fatigue_class']['class']
        if fat_class in fatClassDict:
            fat = fatClassDict[fat_class]['FAT'][0] * stress_unit_dict[
                fatClassDict[fat_class]['FAT']
                [1].lower()]  #SN curve in MPa change to user unit
            n_fat = fatClassDict[fat_class]['Nfat']
            n_c = fatClassDict[fat_class]['Nc']
            m_1 = fatClassDict[fat_class]['m1']
            m_2 = fatClassDict[fat_class]['m2']
        else:
            response[
                0] = fat_class + ' not found in fatClassDict - not catch json schema validator'

    if 'fat_fact' not in json_obj['fatigue_class']:
        fat_fact = 1.
    else:
        fat_fact = json_obj['fatigue_class']['fat_fact']

    if 'N0' in fatClassDict[fat_class]:
        n_0 = fatClassDict[fat_class]['N0']
    else:
        n_0 = 1

    if 'm0' in fatClassDict[fat_class]:
        m_0 = fatClassDict[fat_class]['m0']
    else:
        m_0 = 1

    if 'Ncutoff' in fatClassDict[fat_class]:
        n_cutoff = fatClassDict[fat_class]['Ncutoff']
    else:
        n_cutoff = 1

    res_dict = {}

    res_dict['stress_unit'] = json_obj['stress_unit']
    # Save SN parameters for user check

    res_dict['sn_parameters'] = {}
    res_dict['sn_parameters']['class'] = fat_class
    res_dict['sn_parameters']['fat'] = fat
    res_dict['sn_parameters']['n_fat'] = n_fat
    res_dict['sn_parameters']['n_c'] = n_c
    res_dict['sn_parameters']['m_1'] = m_1
    res_dict['sn_parameters']['m_2'] = m_2
    res_dict['sn_parameters']['fat_fact'] = fat_fact
    res_dict['sn_parameters']['n_0'] = n_0
    res_dict['sn_parameters']['m_0'] = m_0
    res_dict['sn_parameters']['n_cutoff'] = n_cutoff

    # intermediate parameters

    log10_sn_1 = log10(fat * fat_fact) + (log10(n_fat) - log10(n_0)) / m_1
    sn_1 = 10**log10_sn_1
    sn_0 = 10**(log10_sn_1 + log10(n_0) / m_0)
    sn_c = 10**(log10(fat * fat_fact) - (log10(n_c) - log10(n_fat)) / m_1)

    # added extraction of sn_cutoff to plot curve
    sn_cutoff = 10**(log10(sn_c) - (log10(n_cutoff) - log10(n_c)) / m_2)

    #plot sn-curve
    sn_s = [sn_1, sn_0, sn_c, sn_cutoff]
    log_sn_s = [log10(s) for s in sn_s]
    n_s = [n_0, n_0, n_c, n_cutoff]
    log_n_s = [log10(n) for n in n_s]
    plt.plot(log_n_s, log_sn_s)
    plt.savefig("deterministic_SN_curve")
    plt.close()

    # Get mean stress theory parameter
    if 'mean_stress_theory' in json_obj:
        mean_stress_theory = json_obj['mean_stress_theory']['theory']
        if mean_stress_theory in ['Goodman', 'Gerber']:
            r_m = json_obj['mean_stress_theory']['ultimate_limit']
            r_y = 0.9 * r_m
        elif mean_stress_theory == 'Soderberg':
            r_m = 0.
            r_y = json_obj['mean_stress_theory']['yield_limit']
        else:
            response[
                0] = mean_stress_theory + ' not found in mean_stress_theory - not catch json schema validator'

    # Calculate result per bin

    #res_dict = {}
    res_dict['cumulative_damage'] = 0.
    res_dict['safety_factor_life_per_bin'] = 0.
    res_dict['equivalent_stress_range'] = 0.
    res_dict['safety_factor_stress'] = 0.
    res_dict['result_per_bin'] = {}

    # rainflow counting in the user unit

    if 'stress_data' not in json_obj.keys() and 'serie_data' in json_obj.keys(
    ):
        series = json_obj['serie_data']['series']
        if series == []:
            response[0] = 'serie ' + str(series) + ' not valid'
            return
        if 'nbins' in json_obj['serie_data'].keys():

            max_range = max(series) - min(series)
            if 'maxrange' in json_obj['serie_data']:
                my_max_range = json_obj['serie_data']['maxrange']
                max_range = my_max_range
                if max_range < my_max_range:
                    pass
                else:
                    response[
                        0] = 'Warning: serie max range larger than given max range'

            nbins = json_obj['serie_data']['nbins']
            binsize = max_range / nbins

            counts_ix = defaultdict(int)
            for i in range(nbins):
                counts_ix[i] = 0

            # Apply mean stress theory before assigning to bin

            for (rng, mean, count, i_start,
                 i_end) in rainflow.extract_cycles(series):

                if 'mean_stress_theory' in json_obj:
                    sn_0_user_unit = sn_0
                    rng = apply_mean_stress_theory(mean_stress_theory, mean,
                                                   rng, sn_0_user_unit, r_m,
                                                   r_y)
                bin_index = int(abs(rng) / binsize)

                # handle possibility of range equaliing max range

                if bin_index == nbins:
                    bin_index = nbins - 1
                counts_ix[bin_index] += count

            # save count data to dictionary where key is the range

            counts = dict(
                ((k + 1) * binsize, v) for (k, v) in counts_ix.items())
            cycles_list = sorted(counts.items())

            json_obj['stress_data'] = {}

            for i in range(len(cycles_list)):
                json_obj['stress_data'][i] = {
                    'rng': cycles_list[i][0],
                    'cycles': cycles_list[i][1],
                    'sm': 0.
                }
            if 'mean_stress_theory' in json_obj:
                del json_obj['mean_stress_theory']
        elif 'maxrange' in json_obj['serie_data'].keys(
        ) and not 'nbins' in json_obj['serie_data'].keys():
            response[
                0] = '"maxrange" and no "nbins" not currently handle - not catch json schema validator'
        else:
            json_obj['stress_data'] = {}
            for (i, (rng, mean, count, i_start,
                     i_end)) in enumerate(rainflow.extract_cycles(series)):
                json_obj['stress_data'][i] = {
                    'rng': rng,
                    'cycles': count,
                    'sm': mean
                }

        #res_dict['stress_data'] = json_obj['stress_data']

    # Fatigue calculation in MPa unit
    it = 0
    for bin in json_obj['stress_data'].keys():
        if it == 0:
            cycles = json_obj['stress_data'][bin]['cycles']
            rng = json_obj['stress_data'][bin]['rng']
            if 'sm' in json_obj['stress_data'][bin]:
                sm = json_obj['stress_data'][bin]['sm']
            else:
                sm = 0.

            # init result dictionary

            _rst_dic_per_bin = {}
            _rst_dic_per_bin['rng'] = rng
            _rst_dic_per_bin['mean'] = sm
            _rst_dic_per_bin['corrected_rng'] = rng
            _rst_dic_per_bin['cycles'] = cycles
            _rst_dic_per_bin['life'] = 0.
            _rst_dic_per_bin['log10_life'] = 0.
            _rst_dic_per_bin['damage_per_bin'] = 0.
            _rst_dic_per_bin['safety_factor_life_per_bin'] = 0.

            if cycles != 0.:

                # calculate s_nb

                if cycles <= n_0:
                    s_nb = 10**(log10_sn_1 +
                                (log10(n_0) - log10(cycles)) / m_0)
                elif cycles <= n_c:
                    s_nb = 10**(log10(sn_c) +
                                (log10(n_c) - log10(cycles)) / m_1)
                else:
                    s_nb = 10**(log10(sn_c) -
                                (log10(cycles) - log10(n_c)) / m_2)

                # mean stress theory

                if 'mean_stress_theory' in json_obj and 'sm' in json_obj[
                        'stress_data'][bin]:
                    if rng > 1.5 * r_y:
                        response[
                            0] = 'The calculated Nominal stress range exceeds the 1.5*Yield Stress for the material.  The fatigue calculations may not be valid.  Please check the results carefully.'
                    if json_obj['stress_data'][bin]['sm'] != 0.:
                        mean_stress_theory = json_obj['mean_stress_theory'][
                            'theory']
                        rng = apply_mean_stress_theory(mean_stress_theory, sm,
                                                       rng, sn_0, r_m, r_y)
                        _rst_dic_per_bin['corrected_rng'] = rng
                    else:
                        _rst_dic_per_bin['corrected_rng'] = rng
                else:
                    _rst_dic_per_bin['corrected_rng'] = rng

            # Calculate life and store result

            if rng > sn_0:
                _rst_dic_per_bin['log10_life'] = -1.
                _rst_dic_per_bin['life'] = 0.
                _rst_dic_per_bin['damage_per_bin'] = 100.0
                _rst_dic_per_bin['safety_factor_life_per_bin'] = 0.
            elif rng > sn_1:
                _rst_dic_per_bin['log10_life'] = log10(
                    n_0) - m_0 * (log10(rng) - log10_sn_1)
                _rst_dic_per_bin['life'] = 10**_rst_dic_per_bin['log10_life']
            elif rng > sn_c:
                _rst_dic_per_bin['log10_life'] = log10(
                    n_c) - m_1 * (log10(rng) - log10(sn_c))
                _rst_dic_per_bin['life'] = 10**_rst_dic_per_bin['log10_life']
            elif rng > 0.:
                _rst_dic_per_bin['log10_life'] = min(
                    log10(n_cutoff),
                    log10(n_c) + m_2 * (log10(sn_c) - log10(rng)))
                _rst_dic_per_bin['life'] = 10**_rst_dic_per_bin['log10_life']
            else:
                _rst_dic_per_bin['log10_life'] = log10(n_cutoff)
                _rst_dic_per_bin['life'] = n_cutoff
            if _rst_dic_per_bin['life'] > 0:
                _rst_dic_per_bin[
                    'damage_per_bin'] = cycles / _rst_dic_per_bin['life']
                if _rst_dic_per_bin['damage_per_bin'] != 0.:
                    _rst_dic_per_bin[
                        'safety_factor_life_per_bin'] = 1 / _rst_dic_per_bin[
                            'damage_per_bin']
                else:
                    _rst_dic_per_bin['safety_factor_life_per_bin'] = 1.
            _rst_dic_per_bin['safety_factor_stress'] = min(
                100.0, s_nb / max(1, rng))

            # store results

            res_dict['result_per_bin'][bin] = _rst_dic_per_bin
        #it+=1
    # Cumulated Damage

    cum_damage = 0.
    it = 0
    for bin in json_obj['stress_data'].keys():
        if it == 0:
            cum_damage += res_dict['result_per_bin'][bin]['damage_per_bin']
            it += 1
    res_dict['cumulative_damage'] = cum_damage
    if cum_damage > 1e-5:
        res_dict['safety_factor_life_per_bin'] = 1. / cum_damage
        n_seqv = cycles / cum_damage
    else:
        res_dict['safety_factor_life_per_bin'] = 1e5
        n_seqv = n_cutoff

    if n_seqv <= n_0:
        s_eqv = 10**(log10_sn_1 + (log10(n_0) - log10(n_seqv)) / m_0)
    elif n_seqv <= n_c:
        s_eqv = 10**(log10(sn_c) + (log10(n_c) - log10(n_seqv)) / m_1)
    elif n_seqv < n_cutoff:
        s_eqv = 10**(log10(sn_c) - (log10(n_seqv) - log10(n_c)) / m_2)
    else:
        s_eqv = s_nb / 100.0
    res_dict['equivalent_stress_range'] = s_eqv
    res_dict['safety_factor_stress'] = min(100.0, s_nb / s_eqv)
    #print(json.dumps(res_dict, indent=4, sort_keys=True))
    dmg_list = []
    for key, value in res_dict["result_per_bin"].items():
        dmg_list.append(value["damage_per_bin"])
    print("dmg per bin: ", dmg_list)
    print("..........")
    print("total damage: ", sum(dmg_list))
Esempio n. 11
0
def test_extract_cycles(series, cycles, counts):
    result = list(rainflow.extract_cycles(series))
    assert result == cycles
Esempio n. 12
0
def WeldFat(timestamp,_componentId, json_input): 

    """Iterate cycles in the series.

    Parameters
    ----------
    timestamp (datetime.datetime): 
    _componentId (string): "e9fafc85-5f4d-422e-8988-6545890f202c"
    jsonjson_obj (string):  
    Returns
    ------
    res_dict dictionary as json
    
    res_dict['cumulative_damage']
    res_dict['safety_factor_life_per_bin']
    res_dict['equivalent_stress_range']
    res_dict['safety_factor_stress']
    res_dict['rst'] : Fatigue result per bin as below
        res_dict['rst'][bin]['life']
        res_dict['rst'][bin]['log10_life']
        res_dict['rst'][bin]['damage_per_bin']
        res_dict['rst'][bin]['safety_factor_life_per_bin']
        
    """
    # read json json_objs
    json_obj        = {} 
    json_obj        = json.loads(json_input)
    # Define unit conversion from user units to stress_unit_dict units
    stress_unit   = json_obj['stress_unit']
    stress_unit_dict           = {'mpa':1.0e6,'psi':6894.76,'ksi':6894757.29}
    conv_stress = stress_unit_dict[stress_unit.lower()]
    # Get S-N Curve definition parameters
    if "class" not in json_obj["fatigue_class"]:
        fat_class = "User defined"
        fat     = json_obj["fatigue_class"]["fat"]*conv_stress
        n_fat    = json_obj["fatigue_class"]["n_fat"]
        n_c      = json_obj["fatigue_class"]["n_c"]
        m_1      = json_obj["fatigue_class"]["m_1"]
        m_2      = json_obj["fatigue_class"]["m_2"]
    else:
        fat_class = json_obj["fatigue_class"]["class"]
        fat     = fatClassDict[fat_class]['FAT'][0]*stress_unit_dict[fatClassDict[fat_class]['FAT'][1].lower()]
        n_fat    = fatClassDict[fat_class]['Nfat']
        n_c      = fatClassDict[fat_class]['Nc']
        m_1      = fatClassDict[fat_class]['m1']
        m_2      = fatClassDict[fat_class]['m2']
        
    if "fat_fact" not in json_obj["fatigue_class"]:
        fat_fact = 1.
    else:
        fat_fact = json_obj["fatigue_class"]["fat_fact"]
        
    if 'N0' in fatClassDict[fat_class]:
        n_0      = fatClassDict[fat_class]['N0']
    else:
        n_0 = 1
        
    if 'm0' in fatClassDict[fat_class]:
        m_0      = fatClassDict[fat_class]['m0']
    else:
        m_0 = 1
        
    if 'Ncutoff' in fatClassDict[fat_class]:
        n_cutoff = fatClassDict[fat_class]['Ncutoff']
    else:
        n_cutoff = 1
    # intermediate parameters
    log10_sn_1 = log10(fat*fat_fact)+(log10(n_fat)-log10(n_0))/m_1
    sn_1      = 10**(log10_sn_1)
    sn_0      = 10**(log10_sn_1+log10(n_0)/m_0)
    sn_c      = 10**(log10(fat*fat_fact)-(log10(n_c)-log10(n_fat))/m_1)
    # Get mean stress theory parameter
    if "mean_stress_theory" in json_obj:
        mean_stress_theory=json_obj['mean_stress_theory']['theory']  
        if mean_stress_theory in ['Goodman','Gerber']:
            UTS = json_obj['mean_stress_theory']['ultimate_limit']*conv_stress
            r_m = conv_stress*UTS
            r_y = 0.9*r_m
        elif mean_stress_theory == 'Soderberg': 
            SY  = json_obj['mean_stress_theory']['yield_limit']*conv_stress
            r_y = conv_stress*SY
        #elif json_obj['mean_stress_theory'] == 'None': 
        #    if json_obj['Method'] in ['nominalFatigue', 'hotSpotFatigue']: 
        #        r_y = conv_stress*SY
        #    else: 
        #        r_y = 0.0

    # Calculate result per bin
    res_dict = {}
    res_dict['cumulative_damage']        = 0.
    res_dict['safety_factor_life_per_bin'] = 0.
    res_dict['equivalent_stress_range']      = 0.
    res_dict['safety_factor_stress']     = 0.
    res_dict['rst']                          = {}
    
    if 'stress_data' not in json_obj.keys() and 'serie_data' in json_obj.keys():
        series=json_obj['serie_data']["series"]
        if 'nbins' in json_obj['serie_data'].keys(): 
            nbins = json_obj['serie_data']['nbins']
            max_range = max(series) - min(series)
            if 'maxrange' in json_obj['serie_data']:
                my_max_range =json_obj['serie_data']['maxrange']
                max_range = my_max_range
                if max_range < my_max_range:#add error message?
                    pass
                else:
                    print ("serie max range larger than given max range") 
            binsize = max_range / nbins
            counts_ix = defaultdict(int)
            for i in range(nbins):
                counts_ix[i] = 0
            # Apply mean stress theory before assigning to bin
            for low, high, mult in rainflow.extract_cycles(series):
                bin_index = int(abs(high - low) / binsize)
                sm=0.5 * (high + low)
                sa=high - low
                if "mean_stress_theory" in json_obj: #does not handle sn_0
                    sa=apply_mean_stress_theory(mean_stress_theory,sm,sa,sn_0,r_m,r_y)
                bin_index = int(abs(sa) / binsize)
                # handle possibility of range equaliing max range
                if bin_index == nbins:
                    bin_index = nbins - 1
                counts_ix[bin_index] += mult
            # save count data to dictionary where key is the range
            counts = dict(((k+1)*binsize,v) for k,v in counts_ix.items())
            cycles_list=sorted(counts.items())
            #print (cycles_list)
            json_obj['stress_data']={}
            for i in range(len(cycles_list)):
                json_obj['stress_data'][i]={'sa':cycles_list[i][0],'cycles':cycles_list[i][1],'sm':0.}
            if "mean_stress_theory" in json_obj:
                del json_obj["mean_stress_theory"]
        else:
            json_obj['stress_data']={}
            for i,(low, high, mult) in enumerate(rainflow.extract_cycles(series)):
                mean=0.5 * (high + low)
                rng=high - low
                json_obj['stress_data'][i]={'sa':rng,'cycles':mult,'sm':mean}
                
    #print(json_obj['stress_data'])

    for bin in json_obj['stress_data'].keys():
        cycles   = json_obj['stress_data'][bin]["cycles"]
        if cycles !=0.:
            # init result dictionary
            _rst_dic_per_bin                                 = {}
            _rst_dic_per_bin['life']                     = 0.0
            _rst_dic_per_bin['log10_life']              = 0.0
            _rst_dic_per_bin['damage_per_bin']         = 0.0
            _rst_dic_per_bin['safety_factor_life_per_bin'] = 0.0
            # calculate s_nb
            if cycles <= n_0: 
                s_nb = 10**(log10_sn_1+(log10(n_0)-log10(cycles))/m_0)
            elif cycles <= n_c: 
                s_nb = 10**(log10(sn_c)+(log10(n_c)-log10(cycles))/m_1)
            else: 
                s_nb = 10**(log10(sn_c)-(log10(cycles)-log10(n_c))/m_2)

            # mean stress theory
            sa = json_obj['stress_data'][bin]["sa"]*conv_stress
            if "mean_stress_theory" in json_obj and "sm" in json_obj['stress_data'][bin]:
                mean_stress_theory=json_obj['mean_stress_theory']['theory']
                sm = json_obj['stress_data'][bin]["sm"]*conv_stress
                sa=apply_mean_stress_theory(mean_stress_theory,sm,sa,sn_0,r_m,r_y)

        # Calculate life and store result
        if sa > sn_0:
            _rst_dic_per_bin['log10_life']              = -1.0
            _rst_dic_per_bin['life']                     = 0.0
            _rst_dic_per_bin['damage_per_bin']         = 100.0
            _rst_dic_per_bin['safety_factor_life_per_bin'] = 0.0
        elif sa> sn_1:
            _rst_dic_per_bin['log10_life']              = log10(n_0) - m_0*(log10(sa)-log10_sn_1)
            _rst_dic_per_bin['life']                     = 10**_rst_dic_per_bin['log10_life']
        elif sa > sn_c:
            _rst_dic_per_bin['log10_life']              = log10(n_c) - m_1*(log10(sa)-log10(sn_c))
            _rst_dic_per_bin['life']                     = 10**_rst_dic_per_bin['log10_life']
        elif sa > 0.0:
            _rst_dic_per_bin['log10_life']              = min(log10(n_cutoff),log10(n_c) + m_2*(log10(sn_c) - log10(sa)))
            _rst_dic_per_bin['life']                     = 10**_rst_dic_per_bin['log10_life']
        else:
            _rst_dic_per_bin['log10_life']              = log10(n_cutoff)
            _rst_dic_per_bin['life']                     = n_cutoff
        if _rst_dic_per_bin['life']> 0: 
            _rst_dic_per_bin['damage_per_bin']         = cycles/_rst_dic_per_bin['life']
            if _rst_dic_per_bin['damage_per_bin']!=0.:
                _rst_dic_per_bin['safety_factor_life_per_bin'] = 1/_rst_dic_per_bin['damage_per_bin']
            else:
                _rst_dic_per_bin['safety_factor_life_per_bin'] = 1.
        _rst_dic_per_bin['safety_factor_stress']         = min(100.0,s_nb/max(1,sa))
        # store results
        res_dict['rst'][bin]=_rst_dic_per_bin

    #Cumulated Damage  
    cum_damage = 0.
    for bin in json_obj['stress_data'].keys():
        cum_damage += res_dict['rst'][bin]['damage_per_bin']
    res_dict['cumulative_damage'] = cum_damage
    if cum_damage > 1e-5:
        res_dict['safety_factor_life_per_bin'] = 1.0/cum_damage
        n_seqv                                   = cycles/cum_damage
    else:
        res_dict['safety_factor_life_per_bin'] = 1e5
        n_seqv = n_cutoff

    if n_seqv <= n_0: 
        s_eqv                                    = 10**(log10_sn_1+(log10(n_0)-log10(n_seqv))/m_0)
    elif n_seqv <= n_c: 
        s_eqv                                    = 10**(log10(sn_c)+(log10(n_c)-log10(n_seqv))/m_1)
    elif n_seqv < n_cutoff: 
        s_eqv                                    = 10**(log10(sn_c)-(log10(n_seqv)-log10(n_c))/m_2)
    else: 
        s_eqv                                    = s_nb/100.0
    res_dict['equivalent_stress_range']  = s_eqv
    res_dict['safety_factor_stress'] = min(100.0,s_nb/s_eqv)
    #insert the result to the 'Result' database
    data = formatTheResultForDB(res_dict)
    #_resultTimeStamp = datetime.datetime.utcnow()
    Result.add_data(timestamp, datetime.datetime.utcnow() , _componentId, data)
Esempio n. 13
0
def test_extract_cycles_small_series(series, cycles):
    assert list(rainflow.extract_cycles(series)) == cycles
Esempio n. 14
0
def unpack_weldfat(json_input):
    """
        Unpacks input json
        to theano readable parameters
    """
    #unpacking function:
    stress_unit_dict = {
        'mpa': 1.0,
        'psi': 145.038,
        'ksi': 0.145038
    }  #Mpa to user unit

    json_obj = {}
    json_obj = json.loads(json_input)
    kwargs = {}

    if 'class' not in json_obj['fatigue_class']:
        fat_class = 'User defined'
        #kwargs["fat_class"] = 'User defined'
        kwargs["fat"] = json_obj['fatigue_class']['fat']
        kwargs["n_fat"] = json_obj['fatigue_class']['n_fat']
        kwargs["n_c"] = json_obj['fatigue_class']['n_c']
        kwargs["m_1"] = json_obj['fatigue_class']['m_1']
        kwargs["m_2"] = json_obj['fatigue_class']['m_2']
    else:
        fat_class = json_obj['fatigue_class']['class']
        if fat_class in fatClassDict:
            kwargs["fat"] = fatClassDict[fat_class]['FAT'][
                0] * stress_unit_dict[fatClassDict[fat_class]['FAT'][1].lower(
                )]  #SN curve in MPa change to user unit
            kwargs["n_fat"] = fatClassDict[fat_class]['Nfat']
            kwargs["n_c"] = fatClassDict[fat_class]['Nc']
            kwargs["m_1"] = fatClassDict[fat_class]['m1']
            kwargs["m_2"] = fatClassDict[fat_class]['m2']
        else:
            response[
                0] = fat_class + ' not found in fatClassDict - not catch json schema validator'

    if 'fat_fact' not in json_obj['fatigue_class']:
        kwargs["fat_fact"] = 1.
    else:
        kwargs["fat_fact"] = json_obj['fatigue_class']['fat_fact']

    if 'N0' in fatClassDict[fat_class]:
        kwargs["n_0"] = fatClassDict[fat_class]['N0']
    else:
        kwargs["n_0"] = 1

    if 'm0' in fatClassDict[fat_class]:
        kwargs["m_0"] = fatClassDict[fat_class]['m0']
    else:
        kwargs["m_0"] = 1

    if 'Ncutoff' in fatClassDict[fat_class]:
        kwargs["n_cutoff"] = fatClassDict[fat_class]['Ncutoff']
    else:
        kwargs["n_cutoff"] = 1

    if 'mean_stress_theory' in json_obj:
        kwargs["mean_stress_theory"] = json_obj['mean_stress_theory']
        mean_stress_theory = kwargs['mean_stress_theory']['theory']
        if mean_stress_theory in ['Goodman', 'Gerber']:
            kwargs["r_m"] = kwargs['mean_stress_theory']['ultimate_limit']
            kwargs["r_y"] = 0.9 * kwargs["r_m"]
            if mean_stress_theory == "Goodman":
                m_s_th = 1
            else:  #Gerber
                m_s_th = 2
        elif mean_stress_theory == 'Soderberg':
            kwargs["r_m"] = 0.
            kwargs["r_y"] = kwargs['mean_stress_theory']['yield_limit']
            m_s_th = 3
    else:
        m_s_th = 0
    kwargs["m_s_th"] = m_s_th
    del kwargs["mean_stress_theory"]

    if 'stress_data' not in json_obj.keys() and 'serie_data' in json_obj.keys(
    ):
        series = json_obj['serie_data']['series']
        if series == []:
            response[0] = 'serie ' + str(series) + ' not valid'
            return
        if 'nbins' in json_obj['serie_data'].keys():
            kwargs["max_range"] = max(series) - min(series)
            if 'maxrange' in json_obj['serie_data']:
                kwargs["my_max_range"] = json_obj['serie_data']['maxrange']
                kwargs["max_range"] = kwargs["my_max_range"]
                if kwargs["max_range"] < kwargs["my_max_range"]:
                    pass
                else:
                    response[
                        0] = 'Warning: serie max range larger than given max range'

            kwargs["nbins"] = json_obj['serie_data']['nbins']
            kwargs["binsize"] = kwargs["max_range"] / kwargs["nbins"]

            kwargs["counts_ix"] = defaultdict(int)
            for i in range(kwargs["nbins"]):
                kwargs["counts_ix"][i] = 0

            # Apply mean stress theory before assigning to bin

            for (rng, mean, count, i_start,
                 i_end) in rainflow.extract_cycles(series):

                if 'mean_stress_theory' in json_obj:
                    kwargs["sn_0_user_unit"] = kwargs["sn_0"]
                    kwargs["rng"] = apply_mean_stress_theory(
                        kwargs["mean_stress_theory"], mean, rng,
                        kwargs["sn_0_user_unit"], kwargs["r_m"], kwargs["r_y"])
                kwargs["bin_index"] = int(abs(rng) / kwargs["binsize"])

                # handle possibility of range equaliing max range

                if kwargs["bin_index"] == kwargs["nbins"]:
                    kwargs["bin_index"] = kwargs["nbins"] - 1
                kwargs["counts_ix"][kwargs["bin_index"]] += count

            # save count data to dictionary where key is the range

            kwargs["counts"] = dict(((k + 1) * kwargs["binsize"], v)
                                    for (k, v) in kwargs["counts_ix"].items())
            kwargs["cycles_list"] = sorted(kwargs["counts"].items())

            kwargs["stress_data"] = {}

            for i in range(len(kwargs["cycles_list"])):
                kwargs["stress_data"][i] = {
                    'rng': kwargs["cycles_list"][i][0],
                    'cycles': kwargs["cycles_list"][i][1],
                    'sm': 0.
                }
            if 'mean_stress_theory' in json_obj:
                del json_obj['mean_stress_theory']
        elif 'maxrange' in json_obj['serie_data'].keys(
        ) and not 'nbins' in json_obj['serie_data'].keys():
            response[
                0] = '"maxrange" and no "nbins" not currently handle - not catch json schema validator'
        else:
            kwargs['cycles'] = []
            kwargs['rng'] = []
            kwargs['sm'] = []

            for (i, (rng, mean, count, i_start,
                     i_end)) in enumerate(rainflow.extract_cycles(series)):
                kwargs["cycles"].append(count)
                kwargs["rng"].append(rng)
                kwargs["sm"].append(mean)
    #print(json.dumps(kwargs, indent=4, sort_keys=True))
    return kwargs