예제 #1
0
    def get_rule(data_item): 
    
        #get degrees (firing strengths) of each rule
        ants = {} #antecedents of each rule in form ['function', 'var'] : ('ling', degree) where degree is firing strengths
        for inKey in inputMFs: #for each input key

            if not fuzzyDataFlag:
                [inXs, inYs] = fuzzyOps.rangeToMF(data_item[q][inKey], inDataMFs) #build input MF
            else: 
                [inXs, inYs] = data_item[q][inKey]
            
            max_fs = ('none', 0.0) #keep track of MF with maximum firing strength
            
            for ling in inputMFs[inKey]: #for each MF in the input
                #with Timer() as t:
                fs_x,fs_y = fuzz.fuzzy_and(inputMFs[inKey][ling][0],inputMFs[inKey][ling][1], inXs, inYs)
                #print '----------> AND time:', t.msecs, 'ms for input:', inKey, 'at length', len(fs_y)
                fs = max(fs_y)  #get firing strength of data point and given antecedent MF
                if fs > max_fs[1]: max_fs = (ling,fs)

            if max_fs[0] <> 'none':
                ants[inKey + (max_fs[0],)] = max_fs[1] #append rule with highest firing strength to antecedent
                
        if len(ants) > 0:    
            rule_deg = reduce(operator.mul, [ants[k] for k in ants]) #use to calc "degree" or strength of each rule
        else: 
            rule_deg =  0.0
        
        # repeat for outputs
        conts = {} #consequents of each rule in form output : ('ling', degree)
        for outKey in outputMFs: #for each output key
        
            if not fuzzyDataFlag:
                [outXs, outYs] = fuzzyOps.rangeToMF(data_item[2], outDataMFs) #build outputMF
            else:
                [outXs, outYs] = data_item[2]
                
            max_fs = ('none', 0.0) #keep track of MF with maximum firing strength
            for ling in outputMFs[outKey]: #for each MF in the input
                #with Timer() as t:
                fs_x, fs_y = fuzz.fuzzy_and(outputMFs[outKey][ling][0], outputMFs[outKey][ling][1], outXs, outYs)
                #print '----------> AND output time:', t.msecs, 'ms'
                fs = max(fs_y)  #get firing strength of data point and given antecedent MF
                if fs > max_fs[1]: max_fs = (ling,fs)

            conts[(outKey,) + (max_fs[0],)] = max_fs[1] #append rule with highest firing strength to antecedent
        
        if len(conts) > 0:    
            rule_deg = rule_deg * reduce(operator.mul, [conts[k] for k in conts]) #use to calc "degree" or strength of each rule
        else: rule_deg = 0.0
        
        return [ants, conts, rule_deg]
예제 #2
0
 def firing_strength(self, input_name, input_, input_sys):
     if not isinstance(input_, list):  #if a singleton and not a list
         fs = self.fuzzy_single_AND(
             input_,
             [input_sys.MFs[input_name][0], input_sys.MFs[input_name][1]])
         return fs
     x_min, y_min = fuzz.fuzzy_and(
         input_sys.MFs[input_name][0], input_sys.MFs[input_name][1],
         input_[0],
         input_[1])  #use AND operator to get minimum of two functions
     return max(y_min)
예제 #3
0
def firing_strength(self, input_name, input_, input_sys):
    """
    get firing stregth of an input/output
    input_name - linguistic input name
    input_ - list corresponding to input [x,y] or singleton
    input_sys - object corresponding to system input MFs
    """
    if not isinstance(input_, list): #if a singleton and not a list
        fs = self.fuzzy_single_AND(input_, [input_sys.MFs[input_name][0],input_sys.MFs[input_name][1]])        
        return fs        
    x_min,y_min = fuzz.fuzzy_and(input_sys.MFs[input_name][0],input_sys.MFs[input_name][1],input_[0],input_[1]) #use AND operator to get minimum of two functions
    return max(y_min)
예제 #4
0
def firing_strength(self, input_name, input_, input_sys):
    """
    get firing stregth of an input/output
    input_name - linguistic input name
    input_ - list corresponding to input [x,y] or singleton
    input_sys - object corresponding to system input MFs
    """
    if not isinstance(input_, list):  #if a singleton and not a list
        fs = self.fuzzy_single_AND(
            input_,
            [input_sys.MFs[input_name][0], input_sys.MFs[input_name][1]])
        return fs
    x_min, y_min = fuzz.fuzzy_and(
        input_sys.MFs[input_name][0], input_sys.MFs[input_name][1], input_[0],
        input_[1])  #use AND operator to get minimum of two functions
    return max(y_min)
예제 #5
0
    def firing_strength(self, input_name, input_, input_sys):
        """
        Get firing stregth of an input
        
        ------INPUTS------
        input_name : string
            linguistic input name
        input_ : list or float
            corresponding to input [x,y] or singleton
        input_sys - input object
            object corresponding to system input MFs
        ------OUTPUTS------ 
        firing_strength : float
            firing strength of input and MF

        """
        if not isinstance(input_, list): #if a singleton and not a list
            fs = self.fuzzy_single_AND(input_, [input_sys.MFs[input_name][0],input_sys.MFs[input_name][1]])        
            return fs 
        x_min,y_min = fuzz.fuzzy_and(np.array(input_sys.MFs[input_name][0]),
                                     np.array(input_sys.MFs[input_name][1]),
                                     np.array(input_[0]),np.array(input_[1])) #use AND operator to get minimum of two functions
        return max(y_min)
예제 #6
0
    X = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)

    # Create two fuzzy sets by defining any membership function
    # (trapmf(), gbellmf(), gaussmf(), etc).
    abc1 = [0, 25, 50]
    abc2 = [25, 50, 75]
    young = fuzz.membership.trimf(X, abc1)
    middle_aged = fuzz.membership.trimf(X, abc2)

    # Compute the different operations using inbuilt functions.
    one = np.ones(75)
    zero = np.zeros((75,))
    # 1. Union = max(µA(x), µB(x))
    union = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
    # 2. Intersection = min(µA(x), µB(x))
    intersection = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
    # 3. Complement (A) = (1- min(µA(x))
    complement_a = fuzz.fuzzy_not(young)
    # 4. Difference (A/B) = min(µA(x),(1- µB(x)))
    difference = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
    # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
    alg_sum = young + middle_aged - (young * middle_aged)
    # 6. Algebraic Product = (µA(x) * µB(x))
    alg_product = young * middle_aged
    # 7. Bounded Sum = min[1,(µA(x), µB(x))]
    bdd_sum = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
    # 8. Bounded difference = min[0,(µA(x), µB(x))]
    bdd_difference = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]

    # max-min composition
    # max-product composition
예제 #7
0
stop = 10 + delta
step = 0.5
x = np.arange(start, stop + delta, step)

# Triangular membership function
x1 = np.arange(0, 5 + delta, step)
trimf = fuzz.trimf(x1, [0, 2.5, 5])

# Trapezoidal membership function
x2 = np.arange(4, 10 + delta, step)
trapmf = fuzz.trapmf(x2, [4, 6, 8, 10])

# fuzzy logic
tri_not = fuzz.fuzzy_not(trimf)
trap_not = fuzz.fuzzy_not(trapmf)
x3, tri_trap_and = fuzz.fuzzy_and(x1, trimf, x2, trapmf)
x3, tri_trap_or = fuzz.fuzzy_or(x1, trimf, x2, trapmf)

# Defuzzify
centroid_x = fuzz.defuzz(x3, tri_trap_or, "centroid")
centroid_y = fuzz.interp_membership(x3, tri_trap_or, centroid_x)
bisector_x = fuzz.defuzz(x3, tri_trap_or, "bisector")
bisector_y = fuzz.interp_membership(x3, tri_trap_or, bisector_x)
mom_x = fuzz.defuzz(x3, tri_trap_or, "mom")
mom_y = fuzz.interp_membership(x3, tri_trap_or, mom_x)
som_x = fuzz.defuzz(x3, tri_trap_or, "som")
som_y = fuzz.interp_membership(x3, tri_trap_or, som_x)
lom_x = fuzz.defuzz(x3, tri_trap_or, "lom")
lom_y = fuzz.interp_membership(x3, tri_trap_or, lom_x)

# Whole config
plt.plot(x, medio, 'b', linewidth=1.5, label='Medio')

plt.title('Funcion union Maxima')
plt.ylabel('Membresia')
plt.xlabel('Velocidad km/h')
plt.legend(loc='center right', bbox_to_anchor=(
    1.25, 0.5), ncol=1, fancybox=True, shadow=True)

plt.axvline(x)

i = 0
while i <= range(10):
    plt.axvline(i, ymin=0, ymax=10, color='g', licestyle='-.')

plt.plot(0, 1, marker='o', markersize=10, color='g')
plt.plot(1, 0.8, marker='o', markersize=10, color='g')
plt.plot(2, 0.6, marker='o', markersize=10, color='g')
plt.plot(3, 0.6, marker='o', markersize=10, color='g')
plt.plot(4, 0.8, marker='o', markersize=10, color='g')
plt.plot(5, 1, marker='o', markersize=10, color='g')

plt.plot(6, 0.8, marker='o', markersize=10, color='g')
plt.plot(7, 0.6, marker='o', markersize=10, color='g')
plt.plot(8, 0.4, marker='o', markersize=10, color='g')
plt.plot(9, 0.2, marker='o', markersize=10, color='g')
plt.plot(10, 0, marker='o', markersize=10, color='g')

plt.show()

sk.fuzzy_and(x, bajo, x, medio)
예제 #9
0
from skfuzzy import trapmf
from skfuzzy import smf as s_shape_mf
from skfuzzy import zmf as z_shape_mf

font = {'family': 'DejaVu Sans', 'weight': 'normal', 'size': 7}
plt.rc('font', **font)

universe = np.arange(0, 13)

mfA = kinked_curve_mf(universe, [(2, 0), (8, 0.5), (9, 0.25), (14, 0)])
mfB = kinked_curve_mf(universe, [(0, 1), (5, 0)])
mfC = kinked_curve_mf(universe, [(0, 0), (3, 1), (5, 0)])

B_and_C, mf_B_and_C = fuzz.fuzzy_or(universe, mfA, universe, mfB)
A_or_B_and_C, mf_A_or_B_and_C = fuzz.fuzzy_and(B_and_C, mf_B_and_C, universe,
                                               mfC)

fig = plt.figure(figsize=(10, 8))
grid = gridspec.GridSpec(nrows=2, ncols=6)

axA = fig.add_subplot(grid[0, :2],
                      xlim=(0, max(universe)),
                      ylim=(0, 1),
                      title='A')
axB = fig.add_subplot(grid[0, 2:4], sharex=axA, sharey=axA, title='B')
axC = fig.add_subplot(grid[0, 4:], sharex=axA, sharey=axA, title='C')
axBC = fig.add_subplot(grid[1, :3], sharex=axA, sharey=axA, title='B^C')
axD = fig.add_subplot(grid[1, 3:], sharex=axA, sharey=axA, title='AvB^C')

fig.tight_layout()
plt.locator_params(nbins=len(universe))
예제 #10
0
def train_NEFPROX(system,
                  trainData,
                  valData,
                  inMFs,
                  outMFs,
                  iRange,
                  oRange,
                  inDataMFs='sing',
                  outDataMFs='sing',
                  sigma=0.0005,
                  nTrainMax=None,
                  nValMax=None,
                  maxIterations=500,
                  errConvergeCount=5,
                  TESTMODE=False):
    """"
    Trains a NEFPROX system (based on Nernberger, Nauck, Kruse - 
    "Neuro-fuzzy control based on the NEFCON-model: recent developments"
    and on Nauck, Kruse - "Neuro-Fuzzy Systems for Function Approximation")
    
    ------ INPUTS ------
    
    system : instance of nefprox
        NEFPROX system to train
    trainData : list 
        training data set
    valData : list
        separate validation data set... data in form: 
        [quant_inputs, qual_inputs, outputData]
        with each data item ['function', 'var'] : [min,max] 
        (inputs for system are named 'function var')
    inMFs : dict
        dictionary of input MFs: {inputname  : {ling1 : ([x,y], params), ling2 : ([x,y], params), ...},
                                  inputname2 : {ling1: ([x,y], params), ... }, ... }
    outMFs : dict
        dictionary of output MFs: {ling1 : [x,y], ling2 : [xy], ...} 
    iRange : dict
        dictionary of input ranges for MFs: {[input] : [x,y], ... }
    oRange : dict
            dictionary of output ranges for MFs: {[output] : [x,y], ... }
    inDataMFs : string
        type of MF for input data (given [min, max]), supports sing, trap and tri 
    outDataMFs : string
        type of MF for output data (given [min,max]), supports sing, trap and tri 
    sigma : float
        learning rate. modifies adjustments to MFs
    nTrainMax : int
        maximum number of training points to use
    nValMax : int 
        maximum number of validation points to use
    
    Note: As of 30May15 only setup to train fuzzy triangular MFs
    """

    q = 0  #0 for quant data, 1 or qual data

    out_name = outMFs.keys()[0]

    #limit amount of data:
    if nTrainMax <> None: trainData = trainData[:nTrainMax]
    if nValMax <> None: valData = valData[:nValMax]

    del_sysErr = 0.01  #percentage for system error to decrease
    sysErrorLast = 9999
    errDecreaseCount = 0
    errConvergeCount = 0
    errIncreaseCount = 0
    convergeCount = 5  #for this number of turns to converge
    bestSystem = system  #track best system
    bestError = sysErrorLast  #track best error
    iteration = 1  #track iterations through data

    trackOptInfo = optInfo  #track of optimization information
    trackOptInfo.lenTrainData = len(trainData)
    trackOptInfo.lenValidationData = len(valData)

    #print "BUILDING DATA MFS"
    #GET MFs FOR INPUT TRAINING DATA
    trainData_MFs = copy.deepcopy(trainData)

    for dataIt in trainData_MFs:

        #create input MFs for each input
        for inp in dataIt[q]:
            if inDataMFs == 'sing':  #create singleton MF
                dataIt[q][inp] = sum(dataIt[q][inp]) / len(
                    dataIt[q][inp])  #get mean of range (or single value)
                x_range = [
                    dataIt[q][inp] * 0.9, dataIt[q][inp] * 1.1,
                    (dataIt[q][inp] * 1.1 - dataIt[q][inp] * 0.9) / 100.
                ]
                dataIt[q][inp] = list(
                    fuzzOps.singleton_to_fuzzy(
                        dataIt[q][inp], x_range))  #turn singleton value to MF
            elif inDataMFs == 'tri':  #create triangluar MF (min, avg, max)
                x_range = np.arange(
                    dataIt[q][inp][0] * 0.9, dataIt[q][inp][1] * 1.1,
                    (dataIt[q][inp][1] * 1.1 - dataIt[q][inp][0] * 0.9) / 150)
                y_vals = fuzz.trimf(x_range, [
                    dataIt[q][inp][0],
                    sum(dataIt[q][inp]) / len(dataIt[q][inp]),
                    dataIt[q][inp][1]
                ])
                dataIt[q][inp] = [x_range, y_vals]
            elif inDataMFs == 'trap':  #create traoeziodal MF (min, min, max, max)
                x_range = np.arange(
                    dataIt[q][inp][0] * 0.9, dataIt[q][inp][1] * 1.1,
                    (dataIt[q][inp][1] * 1.1 - dataIt[q][inp][0] * 0.9) / 150)
                y_vals = fuzz.trimf(x_range, [
                    dataIt[q][inp][0], dataIt[q][inp][0], dataIt[q][inp][0],
                    dataIt[q][inp][1]
                ])
                dataIt[q][inp] = [x_range, y_vals]

        #create output MFs
        if outDataMFs == 'sing':  #create singleton MF
            dataIt[2] = sum(dataIt[2]) / len(
                dataIt[2])  #get average for singleton value
            x_range = [
                dataIt[2] * 0.9, dataIt[2] * 1.1,
                (dataIt[2] * 1.1 - dataIt[2] * 0.9) / 100.
            ]
            dataIt[2] = list(fuzzOps.singleton_to_fuzzy(
                dataIt[2], x_range))  #turn singleton value to MF
        elif outDataMFs == 'tri':  #create singleton MF
            x_range = np.arange(dataIt[2][0] * 0.9, dataIt[2][1] * 1.1,
                                (dataIt[2][1] * 1.1 - dataIt[2][0] * 0.9) /
                                150)
            y_vals = fuzz.trimf(
                x_range,
                [dataIt[2][0],
                 sum(dataIt[2]) / len(dataIt[2]), dataIt[2][1]])
            dataIt[2] = [x_range, y_vals]
        elif outDataMFs == 'trap':  #create singleton MF
            x_range = np.arange(dataIt[2][0] * 0.9, dataIt[2][1] * 1.1,
                                (dataIt[2][1] * 1.1 - dataIt[2][0] * 0.9) /
                                150)
            y_vals = fuzz.trimf(
                x_range,
                [dataIt[2][0], dataIt[2][0], dataIt[2][1], dataIt[2][1]])
            dataIt[2] = [x_range, y_vals]

    #GET MFs FOR VALIDATION DATA
    valData_MFs = copy.deepcopy(valData)
    for dataIt in valData_MFs:

        #create input MFs for each input
        for inp in dataIt[q]:
            if inDataMFs == 'sing':  #create singleton MF
                dataIt[q][inp] = sum(dataIt[q][inp]) / len(
                    dataIt[q][inp])  #get mean of range (or single value)
                x_range = [
                    dataIt[q][inp] * 0.9, dataIt[q][inp] * 1.1,
                    (dataIt[q][inp] * 1.1 - dataIt[q][inp] * 0.9) / 100.
                ]
                dataIt[q][inp] = list(
                    fuzzOps.singleton_to_fuzzy(
                        dataIt[q][inp], x_range))  #turn singleton value to MF
            elif inDataMFs == 'tri':  #create triangluar MF (min, avg, max)
                x_range = np.arange(
                    dataIt[q][inp][0] * 0.9, dataIt[q][inp][1] * 1.1,
                    (dataIt[q][inp][1] * 1.1 - dataIt[q][inp][0] * 0.9) / 150)
                y_vals = fuzz.trimf(x_range, [
                    dataIt[q][inp][0],
                    sum(dataIt[q][inp]) / len(dataIt[q][inp]),
                    dataIt[q][inp][1]
                ])
                dataIt[q][inp] = [x_range, y_vals]
            elif inDataMFs == 'trap':  #create traoeziodal MF (min, min, max, max)
                x_range = np.arange(
                    dataIt[q][inp][0] * 0.9, dataIt[q][inp][1] * 1.1,
                    (dataIt[q][inp][1] * 1.1 - dataIt[q][inp][0] * 0.9) / 150)
                y_vals = fuzz.trimf(x_range, [
                    dataIt[q][inp][0], dataIt[q][inp][0], dataIt[q][inp][0],
                    dataIt[q][inp][1]
                ])
                dataIt[q][inp] = [x_range, y_vals]

        #create output MFs
        if outDataMFs == 'sing':  #create singleton MF
            dataIt[2] = sum(dataIt[2]) / len(
                dataIt[2])  #get average for singleton value
            x_range = [
                dataIt[2] * 0.9, dataIt[2] * 1.1,
                (dataIt[2] * 1.1 - dataIt[2] * 0.9) / 100.
            ]
            dataIt[2] = list(fuzzOps.singleton_to_fuzzy(
                dataIt[2], x_range))  #turn singleton value to MF
        elif outDataMFs == 'tri':  #create singleton MF
            x_range = np.arange(dataIt[2][0] * 0.9, dataIt[2][1] * 1.1,
                                (dataIt[2][1] * 1.1 - dataIt[2][0] * 0.9) /
                                150)
            y_vals = fuzz.trimf(
                x_range,
                [dataIt[2][0],
                 sum(dataIt[2]) / len(dataIt[2]), dataIt[2][1]])
            dataIt[2] = [x_range, y_vals]
        elif outDataMFs == 'trap':  #create singleton MF
            x_range = np.arange(dataIt[2][0] * 0.9, dataIt[2][1] * 1.1,
                                (dataIt[2][1] * 1.1 - dataIt[2][0] * 0.9) /
                                150)
            y_vals = fuzz.trimf(
                x_range,
                [dataIt[2][0], dataIt[2][0], dataIt[2][1], dataIt[2][1]])
            dataIt[2] = [x_range, y_vals]

    #Add all inputs and input MFs
    for inp in inMFs:
        if not inp in system.layer1:
            system.layer1[inp] = None
        for ling in inMFs[inp]:
            cMF = inMFs[inp][ling]
            system.inputMFs[(inp, ling)] = cMF

    #Add all outputs and output MFs
    for otp in outMFs:
        if not otp in system.layer3:
            system.layer3[otp] = None
        for ling in outMFs[otp]:
            cMF = outMFs[otp][ling]
            system.outputMFs[(otp, ling)] = cMF
    outputname = otp  #should only be one output

    ###### MAIN TRAINING LOOP:
    #iterate over data set until converged or max iterations performed
    while errConvergeCount < convergeCount and iteration <= maxIterations:

        #print "RANDOMIZING DATA"
        trainRef = range(len(trainData))  #get indecies for data
        random.shuffle(trainRef)  #shuffle indecies
        trainData2 = copy.deepcopy(trainData)  #copy data
        trainData_MFs2 = copy.deepcopy(trainData_MFs)
        trainData = [trainData2[i]
                     for i in trainRef]  #assign new order to data
        trainData_MFs = [trainData_MFs2[i] for i in trainRef]

        #print "LEARNING RULES"
        #STRUCTURE LEARNING (learn rules)
        rules = []
        for dataIt in trainData_MFs:  #for each learning data pair (s_i, t_i):

            rule_ant = []
            #for each input create input MF
            for inp in dataIt[q]:

                #find the MF (j) that returns the maximum degree of memebership for input
                maxMF = (0.0, next(k for k in inMFs[inp[0] + '_' + inp[1]])
                         )  #track firing strength and MF
                for mfx in inMFs[inp[0] + '_' + inp[1]]:
                    union = fuzz.fuzzy_and(
                        inMFs[inp[0] + '_' + inp[1]][mfx][0],
                        inMFs[inp[0] + '_' + inp[1]][mfx][1],
                        dataIt[q][inp][0],
                        dataIt[q][inp][1])  #get union (AND) of two MFs
                    fs = max(union[1])  #get firing strength
                    if fs > maxMF[0]: maxMF = (fs, mfx)
                rule_ant.append((inp[0] + '_' + inp[1],
                                 maxMF[1]))  #add to list of best input MFs

            #find output MF that output belongs to with highest degree
            maxMF = (0.0, outMFs[out_name].itervalues().next()
                     )  #grab random MF to start
            for mfx in outMFs[out_name]:
                union = fuzz.fuzzy_and(
                    dataIt[2][0], dataIt[2][1], outMFs[out_name][mfx][0],
                    outMFs[out_name][mfx][1])  #get union (AND) of two MFs
                fs = max(union[1]
                         )  #get "firing strength" (yes I know it's an output)
                if fs > maxMF[0]: maxMF = (fs, mfx)

            rules.append([sorted(rule_ant), maxMF[1], maxMF[0]])  #antecendent

        #METHOD 2: for creating rule base
        #create rule grid, averaging rule "degree" for each consequent. keep the consequent
        #with the highest average "degree"
        rule_grid = {}
        for rule in rules:
            if rule[2] > 0.0 and len(rule[0]) > 0:
                antX = tuple(sorted(a for a in rule[0]))
                if not antX in rule_grid:  #if rule isn't in grid add it
                    rule_grid[antX] = {
                        rule[1]: (rule[2], 1)
                    }  #add new dict with consequent giving (total degree, number of data points)
                else:
                    if not rule[1] in rule_grid[
                            antX]:  #if consequent isn't already accounted for
                        rule_grid[antX][rule[1]] = (rule[2], 1)
                    else:
                        rule_grid[antX][rule[1]] = \
                            (rule_grid[antX][rule[1]][0] + rule[2],#add to the rule dgree and
                             rule_grid[antX][rule[1]][1] + 1)      #update the number of data points

        for rule in rule_grid:  #for each rule grid get the average degree for each consequent
            for cons in rule_grid[rule]:
                rule_grid[rule][cons] = (rule_grid[rule][cons][0]/ \
                                        rule_grid[rule][cons][1], rule_grid[rule][cons][1])

        for rule in rule_grid:  #for each rule grid get the average degree for each consequent
            rule_deg = 0.0
            rule_cons = ''
            for cons in rule_grid[rule]:
                if rule_grid[rule][cons][
                        0] > rule_deg:  #capture consequent with hightest average "degree"
                    rule_deg = rule_grid[rule][cons][0]
                    rule_cons = cons
            rule_grid[rule] = [rule_cons, rule_deg]

        #Translate Rule Grid into NEFPROX
        system.layer2 = {}
        system.connect1to2 = {}
        system.connect2to3 = {}

        for rule in rule_grid:

            if len(system.layer2) > 0:
                nodeNo = max([int(ruleNo) for ruleNo in system.layer2
                              ]) + 1.0  #get new node number/name
            else:
                nodeNo = 0

            system.connect1to2[nodeNo] = {antIn: 0.0
                                          for antIn in rule
                                          }  #create antecedent dict for rule
            system.layer2[nodeNo] = None
            system.connect2to3[nodeNo] = {
                (outputname, rule_grid[rule][0]): None
            }

        #PARAMETER LEARNING (adjust MFs)
        trainingError = 0.0
        trainingActPre = [[], []]  #[[actuals], [prediteds]]
        time1 = datetime.now()
        for i in range(
                len(trainData)
        ):  #for each learning data pair (s_i, t_i) in raw training data:

            #progress report
            time2 = datetime.now()
            if (time2 - time1).seconds > 60.0:
                print round(100.0 * (float(i) / float(len(trainData))),
                            1), '% done learning parameters.'
                time1 = datetime.now()
                #import pdb; pdb.set_trace()
                #sys.exit("Test Finish")

            #build input object
            if inDataMFs == 'sing':  #if crisp inputs use avg of original data
                inData = {
                    inp[0] + '_' + inp[1]:
                    sum(trainData[i][q][inp]) / len(trainData[i][q][inp])
                    for inp in trainData[i][q]
                }
            else:  #otherwise use input
                inData = {
                    inp[0] + '_' + inp[1]: trainData_MFs[i][q][inp]
                    for inp in trainData_MFs[i][q]
                }

            output = system.run(
                inData)  #pass input through system to get result
            output = output[output.keys()
                            [0]]  #should only  be one output value, get it.

            #get delta value: d = t_i - o_i for each output unit (should only be one)
            if not outDataMFs == 'sing' and isinstance(
                    output, list):  #if both output and data are fuzzy
                raise StandardError(
                    'Still have to convert data with output range to MF')
                err = fuzzy_error.fuzErrorAC(trainData_MFs[i], output)
            elif outDataMFs == 'sing' and isinstance(
                    output, float):  #if both output and data are crisp
                err = sum(trainData[i][2]) / len(trainData[i][2]) - output
                trainingError = trainingError + err**2
                trainingActPre[0].append(
                    sum(trainData[i][2]) / len(trainData[i][2]))
                trainingActPre[1].append(output)
            elif not outDataMFs == 'sing' and isinstance(
                    output, float):  #if both output is fuzzy and data is crisp
                raise StandardError('You have not created a case for this yet')
            elif outDataMFs == 'sing' and isinstance(
                    output, list):  #if both output is crisp and data is fuzzy
                raise StandardError('You have not created a case for this yet')

            #back propagate error through system
            system.backpropagate(err, sigma, dataIt[2], iRange, oRange)

        trackOptInfo.trackError[0].append(
            (trainingError / len(trainData))**0.5)  #save RMS training error

        #CHECK SYSTEM ERROR: with Validation data
        sysError = 0.0
        sysActPre = [[], []]  #[[actuals], [prediteds]]
        for i in range(len(valData)):  #for each learning data pair (s_i, t_i):

            #build input object
            if inDataMFs == 'sing':  #if crisp inputs use avg of original data
                inData = {
                    inp[0] + '_' + inp[1]:
                    sum(valData[i][q][inp]) / len(valData[i][q][inp])
                    for inp in valData[i][q]
                }
            else:  #otherwise use input
                inData = {
                    inp[0] + '_' + inp[1]: valData_MFs[i][q][inp]
                    for inp in valData_MFs[i][q]
                }

            output = system.run(
                inData)  #pass input through system to get result
            output = output[output.keys()
                            [0]]  #should only  be one output value, get it.

            #get delta value: d = t_i - o_i for each output unit (should only be one)
            if not outDataMFs == 'sing' and isinstance(
                    output, list):  #if both output and data are fuzzy
                raise StandardError(
                    'Still have to convert data with output range to MF')
                err = fuzzy_error.fuzErrorAC(valData_MFs[i][2], output)
            elif outDataMFs == 'sing' and isinstance(
                    output, float):  #if both output and data are crisp
                err = (sum(valData[i][2]) / len(valData[i][2]) - output)**2
                sysActPre[0].append(sum(valData[i][2]) / len(valData[i][2]))
                sysActPre[1].append(output)
            elif not outDataMFs == 'sing' and isinstance(
                    output, float):  #if both output is fuzzy and data is crisp
                raise StandardError('You have not created a case for this yet')
            elif outDataMFs == 'sing' and isinstance(
                    output, list):  #if both output is crisp and data is fuzzy
                raise StandardError('You have not created a case for this yet')

            sysError = sysError + err  #sum up error
        sysError = (sysError / len(valData)
                    )**0.5  #total system error = (1/2N)sum( (t_i - o_i)^2 )
        trackOptInfo.trackError[1].append(sysError)  #track validation error

        #ONLY TEST MODE
        if TESTMODE:
            plt.figure()
            plt.scatter(trainingActPre[0], trainingActPre[1])
            plt.scatter(sysActPre[0], sysActPre[1])
            plt.legend(["Training", "Validation"])
            plt.show()

        #check error progress
        change_sysError = (
            (sysErrorLast - sysError) / sysErrorLast
        )  #get normalized change in error (negative is an increase)

        if iteration > 1:
            if change_sysError > 0: sigma = sigma * 1.03
            elif change_sysError < 0: sigma = sigma * 0.5



        if change_sysError <= del_sysErr and \
           change_sysError >= 0:
            errConvergeCount = errConvergeCount + 1  #add to count if error change is small enough
        else:
            errConvergeCount = 0  #otherwise reset it
        """
        if change_sysError > 0: 
            errDecreaseCount = errDecreaseCount + 1
            errIncreaseCount = errIncreaseCount + 1   #add to count if error increases
            #errIncreaseCount = 0 #otherwise reset it
        else: 
            errIncreaseCount = errIncreaseCount + 1   #add to count if error increases
            errDecreaseCount = 0
            
        if errDecreaseCount >= 4: 
            sigma = sigma*1.1 #if error increases x times, increase the learning rate
            errIncreaseCount = 0
            errDecreaseCount = 0
        elif errIncreaseCount >= 4: 
            sigma = sigma*0.9 #if fluctuations or increases
            errIncreaseCount = 0
            errDecreaseCount = 0
        """

        if abs(sysError) < bestError:  #track best syste
            bestSystem = copy.deepcopy(system)
            bestError = abs(sysError)

        sysErrorLast = sysError  #track last error

        iteration = iteration + 1
        trackOptInfo.learning_rate.append(sigma)  #track learnin rate

        print 'system error:', round(sysError,
                                     6), '  delta system error:', round(
                                         change_sysError, 6),
        print '  decrease/increase count:', errDecreaseCount, '/', errIncreaseCount, "  learning rate:", round(
            sigma, 8)

    #capture opt info
    trackOptInfo.iterations = iteration

    return bestSystem, trackOptInfo
예제 #11
0
def train_NEFPROX(system, trainData, valData, inMFs, outMFs, iRange, oRange,
                  inDataMFs='sing', outDataMFs='sing', sigma=0.0005, 
                  nTrainMax=None, nValMax=None, maxIterations=500, errConvergeCount=5,
                  TESTMODE=False): 
    """"
    Trains a NEFPROX system (based on Nernberger, Nauck, Kruse - 
    "Neuro-fuzzy control based on the NEFCON-model: recent developments"
    and on Nauck, Kruse - "Neuro-Fuzzy Systems for Function Approximation")
    
    ------ INPUTS ------
    
    system : instance of nefprox
        NEFPROX system to train
    trainData : list 
        training data set
    valData : list
        separate validation data set... data in form: 
        [quant_inputs, qual_inputs, outputData]
        with each data item ['function', 'var'] : [min,max] 
        (inputs for system are named 'function var')
    inMFs : dict
        dictionary of input MFs: {inputname  : {ling1 : ([x,y], params), ling2 : ([x,y], params), ...},
                                  inputname2 : {ling1: ([x,y], params), ... }, ... }
    outMFs : dict
        dictionary of output MFs: {ling1 : [x,y], ling2 : [xy], ...} 
    iRange : dict
        dictionary of input ranges for MFs: {[input] : [x,y], ... }
    oRange : dict
            dictionary of output ranges for MFs: {[output] : [x,y], ... }
    inDataMFs : string
        type of MF for input data (given [min, max]), supports sing, trap and tri 
    outDataMFs : string
        type of MF for output data (given [min,max]), supports sing, trap and tri 
    sigma : float
        learning rate. modifies adjustments to MFs
    nTrainMax : int
        maximum number of training points to use
    nValMax : int 
        maximum number of validation points to use
    
    Note: As of 30May15 only setup to train fuzzy triangular MFs
    """

    q = 0 #0 for quant data, 1 or qual data
    
    out_name = outMFs.keys()[0]

    #limit amount of data:
    if nTrainMax <> None: trainData = trainData[:nTrainMax]
    if nValMax <> None:   valData   = valData[:nValMax]
    
    del_sysErr = 0.01 #percentage for system error to decrease
    sysErrorLast = 9999
    errDecreaseCount = 0
    errConvergeCount = 0
    errIncreaseCount = 0
    convergeCount = 5           #for this number of turns to converge    
    bestSystem = system         #track best system
    bestError = sysErrorLast    #track best error
    iteration = 1               #track iterations through data
   
    trackOptInfo = optInfo      #track of optimization information 
    trackOptInfo.lenTrainData = len(trainData)
    trackOptInfo.lenValidationData = len(valData)
    
    #print "BUILDING DATA MFS"
    #GET MFs FOR INPUT TRAINING DATA
    trainData_MFs = copy.deepcopy(trainData)
    
    for dataIt in trainData_MFs:
        
        #create input MFs for each input
        for inp in dataIt[q]: 
            if inDataMFs == 'sing':   #create singleton MF
                dataIt[q][inp] = sum(dataIt[q][inp])/len(dataIt[q][inp]) #get mean of range (or single value)
                x_range = [dataIt[q][inp]*0.9, dataIt[q][inp]*1.1, (dataIt[q][inp]*1.1 - dataIt[q][inp]*0.9)/100.] 
                dataIt[q][inp] = list(fuzzOps.singleton_to_fuzzy(dataIt[q][inp], x_range)) #turn singleton value to MF 
            elif inDataMFs == 'tri':   #create triangluar MF (min, avg, max)
                x_range = np.arange(dataIt[q][inp][0]*0.9, dataIt[q][inp][1]*1.1, (dataIt[q][inp][1]*1.1 - dataIt[q][inp][0]*0.9)/150)
                y_vals = fuzz.trimf(x_range, [dataIt[q][inp][0], sum(dataIt[q][inp])/len(dataIt[q][inp]), dataIt[q][inp][1]])
                dataIt[q][inp] = [x_range, y_vals]
            elif inDataMFs == 'trap':  #create traoeziodal MF (min, min, max, max)
                x_range = np.arange(dataIt[q][inp][0]*0.9, dataIt[q][inp][1]*1.1, (dataIt[q][inp][1]*1.1 - dataIt[q][inp][0]*0.9)/150)
                y_vals = fuzz.trimf(x_range, [dataIt[q][inp][0], dataIt[q][inp][0], dataIt[q][inp][0], dataIt[q][inp][1]])
                dataIt[q][inp] = [x_range, y_vals]

        #create output MFs
        if outDataMFs == 'sing':   #create singleton MF
            dataIt[2] = sum(dataIt[2])/len(dataIt[2]) #get average for singleton value
            x_range = [dataIt[2]*0.9, dataIt[2]*1.1, (dataIt[2]*1.1 - dataIt[2]*0.9)/100.] 
            dataIt[2] = list(fuzzOps.singleton_to_fuzzy(dataIt[2], x_range)) #turn singleton value to MF           
        elif outDataMFs == 'tri':   #create singleton MF
            x_range = np.arange(dataIt[2][0]*0.9, dataIt[2][1]*1.1, (dataIt[2][1]*1.1 - dataIt[2][0]*0.9)/150)
            y_vals = fuzz.trimf(x_range, [dataIt[2][0], sum(dataIt[2])/len(dataIt[2]), dataIt[2][1]])
            dataIt[2] = [x_range, y_vals]        
        elif outDataMFs == 'trap':   #create singleton MF
            x_range = np.arange(dataIt[2][0]*0.9, dataIt[2][1]*1.1, (dataIt[2][1]*1.1 - dataIt[2][0]*0.9)/150)
            y_vals = fuzz.trimf(x_range, [dataIt[2][0], dataIt[2][0], dataIt[2][1], dataIt[2][1]])
            dataIt[2] = [x_range, y_vals]

    #GET MFs FOR VALIDATION DATA
    valData_MFs = copy.deepcopy(valData)
    for dataIt in valData_MFs:
        
        #create input MFs for each input
        for inp in dataIt[q]: 
            if inDataMFs == 'sing':   #create singleton MF
                dataIt[q][inp] = sum(dataIt[q][inp])/len(dataIt[q][inp]) #get mean of range (or single value)
                x_range = [dataIt[q][inp]*0.9, dataIt[q][inp]*1.1, (dataIt[q][inp]*1.1 - dataIt[q][inp]*0.9)/100.] 
                dataIt[q][inp] = list(fuzzOps.singleton_to_fuzzy(dataIt[q][inp], x_range)) #turn singleton value to MF 
            elif inDataMFs == 'tri':   #create triangluar MF (min, avg, max)
                x_range = np.arange(dataIt[q][inp][0]*0.9, dataIt[q][inp][1]*1.1, (dataIt[q][inp][1]*1.1 - dataIt[q][inp][0]*0.9)/150)
                y_vals = fuzz.trimf(x_range, [dataIt[q][inp][0], sum(dataIt[q][inp])/len(dataIt[q][inp]), dataIt[q][inp][1]])
                dataIt[q][inp] = [x_range, y_vals]
            elif inDataMFs == 'trap':  #create traoeziodal MF (min, min, max, max)
                x_range = np.arange(dataIt[q][inp][0]*0.9, dataIt[q][inp][1]*1.1, (dataIt[q][inp][1]*1.1 - dataIt[q][inp][0]*0.9)/150)
                y_vals = fuzz.trimf(x_range, [dataIt[q][inp][0], dataIt[q][inp][0], dataIt[q][inp][0], dataIt[q][inp][1]])
                dataIt[q][inp] = [x_range, y_vals]

        #create output MFs
        if outDataMFs == 'sing':   #create singleton MF
            dataIt[2] = sum(dataIt[2])/len(dataIt[2]) #get average for singleton value
            x_range = [dataIt[2]*0.9, dataIt[2]*1.1, (dataIt[2]*1.1 - dataIt[2]*0.9)/100.] 
            dataIt[2] = list(fuzzOps.singleton_to_fuzzy(dataIt[2], x_range)) #turn singleton value to MF           
        elif outDataMFs == 'tri':   #create singleton MF
            x_range = np.arange(dataIt[2][0]*0.9, dataIt[2][1]*1.1, (dataIt[2][1]*1.1 - dataIt[2][0]*0.9)/150)
            y_vals = fuzz.trimf(x_range, [dataIt[2][0], sum(dataIt[2])/len(dataIt[2]), dataIt[2][1]])
            dataIt[2] = [x_range, y_vals]        
        elif outDataMFs == 'trap':   #create singleton MF
            x_range = np.arange(dataIt[2][0]*0.9, dataIt[2][1]*1.1, (dataIt[2][1]*1.1 - dataIt[2][0]*0.9)/150)
            y_vals = fuzz.trimf(x_range, [dataIt[2][0], dataIt[2][0], dataIt[2][1], dataIt[2][1]])
            dataIt[2] = [x_range, y_vals]
    
    
    #Add all inputs and input MFs
    for inp in inMFs: 
        if not inp in system.layer1:
            system.layer1[inp] = None
        for ling in inMFs[inp]:
            cMF = inMFs[inp][ling]
            system.inputMFs[(inp, ling)] = cMF
    
    #Add all outputs and output MFs
    for otp in outMFs:
        if not otp in system.layer3: 
            system.layer3[otp] = None
        for ling in outMFs[otp]:
            cMF = outMFs[otp][ling]
            system.outputMFs[(otp, ling)] = cMF
    outputname = otp #should only be one output
                            
    ###### MAIN TRAINING LOOP: 
    #iterate over data set until converged or max iterations performed
    while errConvergeCount < convergeCount and iteration <= maxIterations:
        
        #print "RANDOMIZING DATA"
        trainRef = range(len(trainData)) #get indecies for data
        random.shuffle(trainRef)         #shuffle indecies
        trainData2 = copy.deepcopy(trainData) #copy data
        trainData_MFs2 = copy.deepcopy(trainData_MFs)
        trainData = [trainData2[i] for i in trainRef] #assign new order to data
        trainData_MFs = [trainData_MFs2[i] for i in trainRef]
    
        #print "LEARNING RULES"
        #STRUCTURE LEARNING (learn rules)  
        rules = []
        for dataIt in trainData_MFs: #for each learning data pair (s_i, t_i):
        
            rule_ant = []
            #for each input create input MF
            for inp in dataIt[q]: 
                
                #find the MF (j) that returns the maximum degree of memebership for input
                maxMF = (0.0, next(k for k in inMFs[inp[0] + '_' + inp[1]])) #track firing strength and MF
                for mfx in inMFs[inp[0] + '_' + inp[1]]: 
                    union = fuzz.fuzzy_and(inMFs[inp[0] + '_' + inp[1]][mfx][0], inMFs[inp[0] + '_' + inp[1]][mfx][1],
                                        dataIt[q][inp][0], dataIt[q][inp][1]) #get union (AND) of two MFs
                    fs = max(union[1])                                           #get firing strength
                    if fs > maxMF[0]: maxMF = (fs, mfx)
                rule_ant.append((inp[0]+'_'+inp[1], maxMF[1])) #add to list of best input MFs
            
            #find output MF that output belongs to with highest degree         
            maxMF = (0.0, outMFs[out_name].itervalues().next())  #grab random MF to start
            for mfx in outMFs[out_name]:
                union = fuzz.fuzzy_and(dataIt[2][0], dataIt[2][1], 
                                    outMFs[out_name][mfx][0], outMFs[out_name][mfx][1]) #get union (AND) of two MFs
                fs = max(union[1])  #get "firing strength" (yes I know it's an output)
                if fs > maxMF[0]: maxMF = (fs, mfx)
            
            rules.append([sorted(rule_ant), maxMF[1], maxMF[0]]) #antecendent
                         
        
        #METHOD 2: for creating rule base
        #create rule grid, averaging rule "degree" for each consequent. keep the consequent
        #with the highest average "degree"
        rule_grid = {}
        for rule in rules:
            if rule[2] > 0.0 and len(rule[0]) > 0:
                antX = tuple(sorted(a for a in rule[0]))
                if not antX in rule_grid:               #if rule isn't in grid add it
                    rule_grid[antX] = {rule[1]: (rule[2], 1)} #add new dict with consequent giving (total degree, number of data points)
                else:   
                    if not rule[1] in rule_grid[antX]: #if consequent isn't already accounted for
                        rule_grid[antX][rule[1]] = (rule[2], 1) 
                    else: 
                        rule_grid[antX][rule[1]] = \
                            (rule_grid[antX][rule[1]][0] + rule[2],#add to the rule dgree and
                             rule_grid[antX][rule[1]][1] + 1)      #update the number of data points
        
        for rule in rule_grid: #for each rule grid get the average degree for each consequent
            for cons in rule_grid[rule]: 
                rule_grid[rule][cons] = (rule_grid[rule][cons][0]/ \
                                        rule_grid[rule][cons][1], rule_grid[rule][cons][1])
        
        for rule in rule_grid: #for each rule grid get the average degree for each consequent
            rule_deg = 0.0
            rule_cons = ''
            for cons in rule_grid[rule]:  
                if rule_grid[rule][cons][0] > rule_deg:  #capture consequent with hightest average "degree"
                    rule_deg = rule_grid[rule][cons][0]
                    rule_cons = cons
            rule_grid[rule] = [rule_cons, rule_deg] 
       
        #Translate Rule Grid into NEFPROX
        system.layer2 = {}
        system.connect1to2 = {}
        system.connect2to3 = {}
   
        for rule in rule_grid:    
           
            if len(system.layer2) > 0: nodeNo = max([int(ruleNo) for ruleNo in system.layer2]) + 1.0 #get new node number/name
            else: nodeNo = 0
            
            system.connect1to2[nodeNo] = {antIn:0.0 for antIn in rule} #create antecedent dict for rule
            system.layer2[nodeNo] = None
            system.connect2to3[nodeNo] = {(outputname, rule_grid[rule][0]): None}        

        
        #PARAMETER LEARNING (adjust MFs)
        trainingError = 0.0
        trainingActPre = [[],[]] #[[actuals], [prediteds]]
        time1 = datetime.now()
        for i in range(len(trainData)): #for each learning data pair (s_i, t_i) in raw training data:
            
            #progress report
            time2 = datetime.now()
            if (time2-time1).seconds > 60.0: 
                print round(100.0*(float(i)/float(len(trainData))),1), '% done learning parameters.'
                time1=datetime.now()
                #import pdb; pdb.set_trace()
                #sys.exit("Test Finish")
            
            #build input object            
            if inDataMFs == 'sing': #if crisp inputs use avg of original data
                inData = {inp[0]+'_'+inp[1]: sum(trainData[i][q][inp])/len(trainData[i][q][inp]) for inp in trainData[i][q]} 
            else: #otherwise use input
                inData = {inp[0]+'_'+inp[1]: trainData_MFs[i][q][inp] for inp in trainData_MFs[i][q]} 
            
            output = system.run(inData) #pass input through system to get result
            output = output[output.keys()[0]] #should only  be one output value, get it.
                 
            #get delta value: d = t_i - o_i for each output unit (should only be one)
            if not outDataMFs=='sing' and isinstance(output, list):  #if both output and data are fuzzy
                raise StandardError('Still have to convert data with output range to MF')
                err = fuzzy_error.fuzErrorAC(trainData_MFs[i], output)
            elif outDataMFs=='sing' and isinstance(output, float):  #if both output and data are crisp
                err = sum(trainData[i][2])/len(trainData[i][2]) - output
                trainingError = trainingError + err**2
                trainingActPre[0].append(sum(trainData[i][2])/len(trainData[i][2]))
                trainingActPre[1].append(output)
            elif not outDataMFs=='sing' and isinstance(output, float):  #if both output is fuzzy and data is crisp
                raise StandardError('You have not created a case for this yet')
            elif outDataMFs=='sing' and isinstance(output, list):  #if both output is crisp and data is fuzzy
                raise StandardError('You have not created a case for this yet')
            
            #back propagate error through system
            system.backpropagate(err, sigma, dataIt[2], iRange, oRange)
        
        trackOptInfo.trackError[0].append( (trainingError/len(trainData))**0.5 ) #save RMS training error
            
        #CHECK SYSTEM ERROR: with Validation data
        sysError = 0.0
        sysActPre = [[],[]] #[[actuals], [prediteds]]
        for i in range(len(valData)): #for each learning data pair (s_i, t_i):

            #build input object
            if inDataMFs == 'sing': #if crisp inputs use avg of original data
                inData = {inp[0]+'_'+inp[1]: sum(valData[i][q][inp])/len(valData[i][q][inp]) for inp in valData[i][q]} 
            else: #otherwise use input
                inData = {inp[0]+'_'+inp[1]: valData_MFs[i][q][inp] for inp in valData_MFs[i][q]} 
                    
            output = system.run(inData) #pass input through system to get result
            output = output[output.keys()[0]] #should only  be one output value, get it.
            
            #get delta value: d = t_i - o_i for each output unit (should only be one)
            if not outDataMFs=='sing' and isinstance(output, list):  #if both output and data are fuzzy
                raise StandardError('Still have to convert data with output range to MF')
                err = fuzzy_error.fuzErrorAC(valData_MFs[i][2], output)
            elif outDataMFs=='sing' and isinstance(output, float):  #if both output and data are crisp
                err = (sum(valData[i][2])/len(valData[i][2]) - output)**2
                sysActPre[0].append(sum(valData[i][2])/len(valData[i][2]))
                sysActPre[1].append(output)
            elif not outDataMFs=='sing' and isinstance(output, float):  #if both output is fuzzy and data is crisp
                raise StandardError('You have not created a case for this yet')
            elif outDataMFs=='sing' and isinstance(output, list):  #if both output is crisp and data is fuzzy
                raise StandardError('You have not created a case for this yet')
                
            sysError = sysError + err #sum up error
        sysError = (sysError/len(valData))**0.5 #total system error = (1/2N)sum( (t_i - o_i)^2 )
        trackOptInfo.trackError[1].append( sysError ) #track validation error
        
        #ONLY TEST MODE
        if TESTMODE:
            plt.figure()
            plt.scatter(trainingActPre[0],trainingActPre[1])
            plt.scatter(sysActPre[0], sysActPre[1])
            plt.legend(["Training", "Validation"])
            plt.show()
            
            
            
        #check error progress
        change_sysError = ((sysErrorLast - sysError)/sysErrorLast) #get normalized change in error (negative is an increase)
        
        if iteration > 1:
            if change_sysError > 0: sigma = sigma*1.03
            elif change_sysError < 0: sigma = sigma*0.5
            
        
        
        if change_sysError <= del_sysErr and \
           change_sysError >= 0: errConvergeCount = errConvergeCount + 1   #add to count if error change is small enough
        else: errConvergeCount = 0 #otherwise reset it
        
        
        
        """
        if change_sysError > 0: 
            errDecreaseCount = errDecreaseCount + 1
            errIncreaseCount = errIncreaseCount + 1   #add to count if error increases
            #errIncreaseCount = 0 #otherwise reset it
        else: 
            errIncreaseCount = errIncreaseCount + 1   #add to count if error increases
            errDecreaseCount = 0
            
        if errDecreaseCount >= 4: 
            sigma = sigma*1.1 #if error increases x times, increase the learning rate
            errIncreaseCount = 0
            errDecreaseCount = 0
        elif errIncreaseCount >= 4: 
            sigma = sigma*0.9 #if fluctuations or increases
            errIncreaseCount = 0
            errDecreaseCount = 0
        """ 
        
        if abs(sysError) < bestError:           #track best syste
            bestSystem = copy.deepcopy(system) 
            bestError = abs(sysError)

        sysErrorLast = sysError #track last error
        
        iteration = iteration + 1
        trackOptInfo.learning_rate.append(sigma) #track learnin rate
        
        print 'system error:', round(sysError,6), '  delta system error:', round(change_sysError,6), 
        print '  decrease/increase count:', errDecreaseCount, '/', errIncreaseCount, "  learning rate:", round(sigma,8)
    
    #capture opt info            
    trackOptInfo.iterations = iteration
    
    return bestSystem, trackOptInfo               
예제 #12
0
ax0.legend()

ax1.plot(x_servicio, servicio_bajo, 'b', linewidth=1.5, label='Pobre')
ax1.plot(x_servicio, servicio_medio, 'g', linewidth=1.5, label='Aceptable')
ax1.plot(x_servicio, servicio_alto, 'r', linewidth=1.5, label='Asombrosa')
ax1.set_title('Calidad de Servicio')
ax1.legend()

ax2.plot(x_tip, tip_bajo, 'b', linewidth=1.5, label='Bajo')
ax2.plot(x_tip, tip_medio, 'g', linewidth=1.5, label='Medio')
ax2.plot(x_tip, tip_alto, 'r', linewidth=1.5, label='Alto')
ax2.set_title('Cantidad de propina')
ax2.legend()

#probando operacion and
probando_and = fuzz.fuzzy_and(x_calidad, calidad_alta, x_servicio,
                              servicio_alto)

# desaparece los cuadros de arriba y derecha (mas kawaii)
for ax in (ax0, ax1, ax2):
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()

plt.tight_layout()

#Se eligieron reglas para definir el conjunto , tal como se explica a continuacion
"""
.. image:: PLOT2RST.current_figure

Fuzzy rules
예제 #13
0
 def feedforward(self, inputs):
     """
     ------INPUTS------
     
     inputs : dict
         the set of outputs from previous nodes in form 
         {nodeName: value, nodeName: value, ...} (or inputs for input 
         nodes)   
     """
     #INPUT FEED FORWARD (fuzzify inputs)
     for inp in self.layer1:
         try:
             if inp in inputs: #check if input is given
                 if not isinstance(inputs[inp], list):
                     MF = fuzzOps.paramsToMF([inputs[inp]]) #get fuzzy MF for singleton
                     self.layer1[inp] = MF
                 else: 
                     self.layer1[inp] = inputs[inp]
             else: 
                 print "Not all inputs given!!!" 
                 self.layer3[self.layer3.keys()[0]] = None #set system output to None
         except:
             raise StandardError("NEFPROX input error!")
     
     #RULE FEED FORWARD (gets min (t-norm) firing strength of weights/MFterms and inputs)
     for rule in self.layer2: #for each rule
         for inp in self.connect1to2[rule]: #for each input in antecedent 
             fs = max(fuzz.fuzzy_and(self.inputMFs[inp][0], self.inputMFs[inp][1],
                                     self.layer1[inp[0]][0], self.layer1[inp[0]][1])[1])
             self.connect1to2[rule][inp] = fs
         self.layer2[rule] = min([self.connect1to2[rule][inp] for inp in self.connect1to2[rule]])
 
     #OUTPUT FEED FORWARD (apply minimum of firing strength and output MF (reduce output MF), then aggregate)
     outMFs = []
     for rule in self.connect2to3: #for each rule
         cons = self.connect2to3[rule].keys()[0]  #get consequent for single output
         if self.layer2[rule] > 0.0: #only for active rules (save time)
             outMF = copy.deepcopy(self.outputMFs[cons][:2])
             outMF[1] = np.asarray([ min(self.layer2[rule], outMF[1][i]) for i in range(len(outMF[1])) ])
                         #apply minimum of firing strength and output MF (reduce output MF)
             self.connect2to3[rule][cons] = outMF
             outMFs.append(outMF)
         else: #for inactive rules, applied MF is 0.0 for all 
             self.connect2to3[rule][cons] = [np.asarray([0.0, 0.0]), np.asarray([0.0,0.0])]
             
     #once all rules are reduced with MFs aggregate
     if len(outMFs) > 0: #check for no rules fired
         while len(outMFs) > 1: #get maximum (union) of all MFs (aggregation)
             outMFs0 = outMFs.pop(0)
             outMFs[0][0], outMFs[0][1] = fuzz.fuzzy_or(outMFs0[0], outMFs0[1], 
                                                         outMFs[0][0], outMFs[0][1])
         
         if   self.defuzz == None: pass
         elif self.defuzz == 'centroid': outMFs[0] = fuzz.defuzz(outMFs[0][0],outMFs[0][1],'centroid')
         elif self.defuzz == 'bisector': outMFs[0] = fuzz.defuzz(outMFs[0][0],outMFs[0][1],'bisector')
         elif self.defuzz == 'mom':      outMFs[0] = fuzz.defuzz(outMFs[0][0],outMFs[0][1],'mom')               #mean of maximum
         elif self.defuzz == 'som':      outMFs[0] = fuzz.defuzz(outMFs[0][0],outMFs[0][1],'som')               #min of maximum
         elif self.defuzz == 'lom':      outMFs[0] = fuzz.defuzz(outMFs[0][0],outMFs[0][1],'lom')               #max of maximum
         
         self.layer3[cons[0]] = outMFs[0]
         
     else:#if no rules fire, then output is None
     
         if self.defuzz == None:
             self.layer3[cons[0]] = [[0.0,0.0],[0.0,0.0]] #result 0.0 in fuzzy MF form
         else:
             self.layer3[cons[0]] = 0.0 #result 0.0 as crisp if some defuzz method specified
         
     return True
예제 #14
0
    def backpropagate(self, error, LR, data, inRanges, outRanges):
        """
        ------INPUTS------
        """
        
        
        #BACKPROP THROUGH OUTPUT NODE:
        if not isinstance(data, list):
            dataFuzz = fuzzOps.paramsToMF[[data]] #get fuzzy version of data for FS
        else: 
            dataFuzz = copy.deepcopy(data)
            data = fuzz.defuzz(data[0], data[1], 'centroid') #get crisp version of data
        
        for rule in self.connect2to3:    #for each rule to output connectionMF
            if self.layer2[rule] > 0.0:   #if rule is firing > 0.0
                outKey = self.connect2to3[rule].keys()[0] #get connection (outName,ling)
                fs = max(fuzz.fuzzy_and(self.outputMFs[outKey][0], self.outputMFs[outKey][1],
                                        dataFuzz[0], dataFuzz[1])[1])
                    #get "firing strength" of individual MF (result of MF for data: W(R,y_i)(t_i))

                #GET CONSTRAINTS: 
                #Triangular: MFs must overlap (or touch other MFs)
                [minP, maxP] = outRanges[outKey[0]]
                if len(self.outputMFs[outKey][2]) == 2:
                    raise StandardError("haven't programmed this")
                if len(self.outputMFs[outKey][2]) == 3:
                    all_params = [self.outputMFs[ling][2] for ling in self.outputMFs]
                    all_params.sort(key=lambda x: x[1]) #sort params by orderer of b value

                    min_ps = all_params[max(0, all_params.index(self.outputMFs[outKey][2]) - 1)] #get MF just < the one changing
                    if min_ps == self.outputMFs[outKey][2]: 
                        min_op = [minP, minP, minP]   #adjust if MF is minimum one 

                    max_ps = all_params[min(len(all_params) - 1, all_params.index(self.outputMFs[outKey][2]) + 1)]#get MF just > the one changing
                    if max_ps == self.outputMFs[outKey][2]: 
                        max_op = [maxP, maxP, maxP]  #adjust if MF is maximum one
                else: 
                    raise StandardError("haven't programmed this")

                if fs > 0: #for W(R,y_i)(t_i) > 0
                    if len(self.outputMFs[outKey][2]) == 2: #gaussian MF adjustment
                        raise StandardError("haven't programmed this")
                        
                    elif len(self.outputMFs[outKey][2]) == 3: #triangular MF adjustment
                        [a,b,c] = self.outputMFs[outKey][2][:] #get params
                        del_b = LR*error*(c - a)*self.layer2[rule]*(1-fs)
                        del_a = LR*(c - a)*self.layer2[rule] + del_b
                        del_c = -1*LR*(c - a)*self.layer2[rule] + del_b
                        b = min( max(b+del_b, min_ps[1]), max_ps[1] ) #bound b by nearest b's
                        a = min(b, min( max(a+del_a, min_ps[0]), min_ps[2] )) #bound a by nearest a and c and keep a < b
                        c = max(b, min( max(c+del_c, max_ps[0]), max_ps[2] )) #bound c by nearest a and c and keep c > b
                        self.outputMFs[outKey][2] = [a,b,c] #update params

                    elif len(self.outputMFs[outKey][2]) == 4: #trapezoidal MF adjustment
                        raise StandardError("haven't programmed this")

                else: #for W(R,y_i)(t_i) = 0
                    if len(self.outputMFs[outKey][2]) == 2: #gaussian MF adjustment
                        raise StandardError("haven't programmed this")
                        
                    elif len(self.outputMFs[outKey][2]) == 3: #triangular MF adjustment
                        [a,b,c] = self.outputMFs[outKey][2][:] #get params
                        del_b = LR*error*(c - a)*self.layer2[rule]*(1-fs)
                        del_a = np.sign(data-b)*LR*(c - a)*self.layer2[rule] + del_b
                        del_c = np.sign(data-b)*LR*(c - a)*self.layer2[rule] + del_b
                        b = min( max(b+del_b, min_ps[1]), max_ps[1] ) #bound b by nearest b's
                        a = min(b, min( max(a+del_a, min_ps[0]), min_ps[2] )) #bound a by nearest a and c and keep a < b
                        c = max(b, min( max(c+del_c, max_ps[0]), max_ps[2] )) #bound c by nearest a and c and keep c > b
                        self.outputMFs[outKey][2] = [a,b,c] #update params

                    elif len(self.outputMFs[outKey][2]) == 4: #trapezoidal MF adjustment
                        raise StandardError("haven't programmed this")
                        

                newMF = fuzzOps.paramsToMF(self.outputMFs[outKey][2]) #get updated MF
                self.outputMFs[outKey][0] = newMF[0]    #update MF
                self.outputMFs[outKey][1] = newMF[1]    #update MF
        
        #BACKPROP THROUGH RULE NODES:
        for rule in self.layer2:
            #get Rule Error: E_R = o_R(1-o_R) * sum(2*W(R,y)(t_i) - 1) * abs(error)
            # note: only one output node:
            outKey = self.connect2to3[rule].keys()[0] #get connection (outName,ling)
            fs = max(fuzz.fuzzy_and(self.outputMFs[outKey][0], self.outputMFs[outKey][1],
                                dataFuzz[0], dataFuzz[1])[1])
            E_R = self.layer2[rule]*(1-self.layer2[rule]) * (2*fs-1) * abs(error) #rule error
            
            for input in self.connect1to2[rule]:
                o_x = fuzz.defuzz(self.layer1[input[0]][0], self.layer1[input[0]][1], 'centroid') #crisp version of input
                
                if self.connect1to2[rule][input] > 0.0: #if FS from that input > 0

                    if fs > 0: #for W(R,y_i)(t_i) > 0
                        if len(self.outputMFs[outKey][2]) == 2: #gaussian MF adjustment
                            raise StandardError("haven't programmed this")
                            
                        elif len(self.outputMFs[outKey][2]) == 3: #triangular MF adjustment
                        
                            [a,b,c] = self.inputMFs[input][2][:] #get params
                            del_b = LR*E_R*(c - a)*(1-self.connect1to2[rule][input])*np.sign(o_x - b)
                            del_a = -LR*E_R*(c - a)*(1-self.connect1to2[rule][input])+del_b
                            del_c = LR*E_R*(c - a)*(1-self.connect1to2[rule][input])+del_b
                            #print 'LR', LR, 'E_R', E_R, 'c-a', c-a, '1-W(x,R)(ox)', (1-self.connect1to2[rule][input])
                            #print 'rule dels:', [del_a, del_b, del_c]
                            self.inputMFs[input][2] = [min(self.inputMFs[input][2][0]+del_a, self.inputMFs[input][2][1]+del_b), 
                                                       self.inputMFs[input][2][1]+del_b,
                                                       max(self.inputMFs[input][2][2]+del_c, self.inputMFs[input][2][1]+del_b) ] #update params                            
                        
                        elif len(self.outputMFs[outKey][2]) == 4: #trapezoidal MF adjustment
                            raise StandardError("haven't programmed this")
                            
                        newMF = fuzzOps.paramsToMF(self.inputMFs[input][2]) #get updated MF
                        self.inputMFs[input][0] = newMF[0]    #update MF
                        self.inputMFs[input][1] = newMF[1]    #update MF 
예제 #15
0
 def firing_strength(self, input_name, input_, input_sys):
     if not isinstance(input_, list): #if a singleton and not a list
         fs = self.fuzzy_single_AND(input_, [input_sys.MFs[input_name][0],input_sys.MFs[input_name][1]])        
         return fs        
     x_min,y_min = fuzz.fuzzy_and(input_sys.MFs[input_name][0],input_sys.MFs[input_name][1],input_[0],input_[1]) #use AND operator to get minimum of two functions
     return max(y_min)