def main(dataset_paths): for i in range(int(const.get('K'))): if const.get('PICKLE_LOAD'): car = pickleLoad('car') else: if const.get('VERBOSE'): print('car────────────────────────') car = Anfis(dataset_paths['car'], i=i) car.train(const.get('EPOCHS')) if const.get('VERBOSE'): printTime(car.anfis.time) if const.get('PICKLE_LOAD'): truck = pickleLoad('truck') else: if const.get('VERBOSE'): print('truck──────────────────────') truck = Anfis(dataset_paths['truck'], i=i) truck.train(const.get('EPOCHS')) if const.get('VERBOSE'): printTime(truck.anfis.time) printResult(car, 'car') printResult(truck, 'truck') if const.get('PICKLE_DUMP'): pickleDump(car, 'car') pickleDump(truck, 'truck')
def pickleDump(anfis, type): path = "{video}_{features}_{epochs}_{k}_{type}.pkl".format( video=const.get('VIDEO_NUM'), features=','.join(const.get('FEATURE')), epochs=const.get('EPOCHS'), k=const.get('K'), type=type ) pd.to_pickle(anfis, path) print("ANFIS for {0} is pickled at {1}".format(type, path))
def pickleLoad(type): path = "{video}_{features}_{epochs}_{k}_{type}.pkl".format( video=const.get('VIDEO_NUM'), features=','.join(const.get('FEATURE')), epochs=const.get('EPOCHS'), k=const.get('K'), type=type ) if not exists(path): print('{path} not found'.format(path=path)) exit() return pd.read_pickle(path)
def generateMf(self, train_x, test_x): mf = [] for i in range(len(train_x[0])): mf1 = mean(train_x[:, i]) mf2 = mean(test_x[:, i]) if int(const.get('VIDEO_NUM')) == 1 and const.get('FEATURE')[i] == 'contrast': mf1 += 30 mf2 -= 30 mf.append([ ['gaussmf', {'mean' : mf1, 'sigma' : 10}], ['gaussmf', {'mean' : mf2, 'sigma' : 7}], ]) return mf
def printResult(anfis, name): accuracy = round(anfis.anfis.accuracy, 4) precision = round(anfis.anfis.precision, 4) recall = round(anfis.anfis.recall, 4) f_measure = calcF_measure(precision, recall) if const.get('VERBOSE'): print('{0}───────────────────────────────────'.format(name)) print('accuracy','precision','recall', '\tf_measure', sep="\t") print( accuracy, precision, recall, f_measure, sep="\t\t") print() else: print(','.join(const.get('FEATURE'))) print(const.get('VIDEO_NAME')[int(const.get('VIDEO_NUM'))]) print(name) print(accuracy, f_measure, sep=',')
def __init__(self, dataset_paths, k=5, i=0): train_x = [] test_x = [] for path in dataset_paths: dataset = self.loadDataset(path) train_data, test_data = self.splitDataset(dataset[:, 1:], k, i) last = train_data.shape[1] - 1 train_x = concat(train_x, train_data[:, 0:last], 1) train_y = train_data[:, last] test_x = concat(test_x, test_data[:, 0:last], 1) test_y = test_data[:, last] if const.get('VERBOSE'): print('train data: ', train_x.shape[0]) print('test data: ', test_x.shape[0]) print() self.mfc = mf.MemFuncs(self.generateMf(train_x, test_x)) self.anfis = anfis.ANFIS(train_x, train_y, test_x, test_y, self.mfc)
def play(self, save, classify, anfises, start_from=0): if classify: if anfises is None: print('Error: you need to set anfis object') exit() elif anfises['car'].isTrained is False and anfises[ 'truck'].isTrained is False: print('Error: you need to train anfis before') exit() for _ in range(start_from): self.moveToNextFrame() classifier = cv2.CascadeClassifier(const.get('CASCADE_PATH')) if save: os.makedirs(image_path(self.file_name), exist_ok=True) cnt = 1 rectangle_color = None while self.current_color is not None: objects = classifier.detectMultiScale( self.current_color, scaleFactor=1.05, minNeighbors=2, minSize=(10, 10), ) for object in objects: (x, y) = tuple(object[0:2]) (w, h) = tuple(object[2:4]) if classify: glcm = Object(self.current_gray[y:y + h, x:x + w]) features = glcm.get(const.get('FEATURE')) result = [ twmeggs.predict(anfises['car'], np.array( [features]))[0][0] > 0.5, twmeggs.predict(anfises['truck'], np.array( [features]))[0][0] > 0.5, ] if result == [True, False]: rectangle_color = const.get('RECT_COLOR_CAR') elif result == [False, True]: rectangle_color = const.get('RECT_COLOR_TRUCK') cv2.imwrite( image_path(self.file_name + '/{0}.png'.format(cnt)), self.current_gray[y:y + h, x:x + w]) if save: cnt += 1 if rectangle_color is not None: cv2.rectangle( self.current_color, (x, y), (x + w, y + h), rectangle_color, 2, ) if cv2.waitKey(1) == ord('q'): break self.showFrame() self.moveToNextFrame() cv2.destroyAllWindows()
def main( path=const.get('VIDEO_PATH'), save=False, classify=False, anfises=None): video = Video(path) video.play(save=bool(save), classify=bool(classify), anfises=anfises) video.close()
def train(self, epochs=5, tolerance=1e-5, initialGamma=1000, k=0.01): # Jang's Hybrid off-line training start = time.time() convergence = False epoch = 1 while (epoch < epochs) and (convergence is False): # layer four: forward pass [layerFour, wSum, w] = forwardHalfPass(self, self.trainX) # layer five: least squares estimate layerFive = np.array(self.LSE(layerFour, self.trainY, initialGamma)) self.consequents = layerFive # np.dot: 内積 layerFive = np.dot(layerFour, layerFive) # error error = np.mean((self.trainY - layerFive.T)**2) if const.get('VERBOSE'): if epoch > 1: diff = error - self.errors[-1] print('{epoch} error: {error} ({diff})'.format(epoch=epoch, error=error, diff=diff)) else: print('{epoch} error: {error}'.format(epoch=epoch, error=error)) self.errors = np.append(self.errors, error) if error < self.min_error: self.min_error = error if error < tolerance: convergence = True # back propagation if convergence is False: cols = list(range(len(self.trainX[0, :]))) # dE_dAlpha = [ # [array([a, b]), array([c, d])], # [array([e, f]), array([g, h])] # ] dE_dAlpha = list( backprop(self, colX, cols, wSum, w, layerFive) for colX in range(self.trainX.shape[1])) # eta: 学習率 eta = self.calcEta(k, dE_dAlpha) ## handling of variables with a different number of MFs dAlpha = copy.deepcopy(dE_dAlpha) if not (self.memFuncsHomo): for x in range(len(dE_dAlpha)): for y in range(len(dE_dAlpha[x])): for z in range(len(dE_dAlpha[x][y])): dAlpha[x][y][z] = -eta * dE_dAlpha[x][y][z] else: # dAlpha = [ # [[-eta*a -eta*b][-eta*c -eta*d]] # [[-eta*e -eta*f][-eta*g -eta*h]] # ] dAlpha = -eta * np.array(dE_dAlpha) for i in range(len(self.memFuncs)): for MFs in range(len(self.memFuncsByVariable[i])): # paramList = ['mean', 'sigma'] paramList = sorted(self.memFuncs[i][MFs][1]) for param in range(len(paramList)): # Update memFuncs self.memFuncs[i][MFs][1][ paramList[param]] += dAlpha[i][MFs][param] epoch += 1 self.fittedValues = predict(self, self.testX) self.aggregate() self.time = round(time.time() - start, 2) self.isTrained = True