def tune_noise_classifier(): GlobalConfig.set('dataset', 'kylberg') GlobalConfig.set('ECS', True) GlobalConfig.set('algorithm', 'NoiseClassifier') GlobalConfig.set('scale', 0.5) GlobalConfig.set( 'CWD', r'\\filestore.soton.ac.uk\users\ojvl1g17\mydocuments\COMP3200-Texture-Classification' ) #GlobalConfig.set('CWD', os.getcwd()) GlobalConfig.set('folds', 10) cores = psutil.cpu_count() GlobalConfig.set('cpu_count', cores) dataset = DatasetManager.KylbergTextures(num_classes=28, data_ratio=0.5) images = dataset.load_data() gc.collect() bm3d_images = [] # Convert to BM3D images for image in images: new_image = BM3DELBP.BM3DELBPImage(image) bm3d_images.append(new_image) print("Image dataset loaded, loaded {} images".format(len(images))) noise_classifier = NoiseClassifier() cross_validator = dataset.get_cross_validator() bm3d_sigma = [10, 30, 40, 50] homomorphic_cutoff = [0.1, 0.5, 5, 10] homomorphic_a = [0.5, 0.75, 1.0] homomorphic_b = [0.1, 0.5, 1.0, 1.25] settings_jobs = [] # List of all configuration tuples for sigma_val in bm3d_sigma: for cutoff in homomorphic_cutoff: for a in homomorphic_a: for b in homomorphic_b: settings_jobs.append((sigma_val, cutoff, a, b)) results = [] # List of tuples (F1, sigma_val, cutoff, a, b) out_csv = os.path.join(GlobalConfig.get('CWD'), 'NoiseClassifierTuning', 'Results.txt') with Pool(processes=GlobalConfig.get('cpu_count'), maxtasksperchild=50) as pool: # Generate image featurevectors and replace DatasetManager.Image with BM3DELBP.BM3DELBPImage for result in tqdm.tqdm(pool.istarmap( do_classification, zip(settings_jobs, repeat(noise_classifier), repeat(cross_validator), repeat(bm3d_images))), total=len(settings_jobs), desc='NoiseClassifier tuning'): f1, sigma, cutoff, a, b = result # Log to CSV file if os.path.isfile(out_csv): # CSV exists, append to end of file with open(out_csv, 'a', encoding="utf-8", newline='') as resultsfile: writer = csv.writer(resultsfile) writer.writerow([f1, sigma, cutoff, a, b]) else: # CSV does not exist. Write the headings with open(out_csv, 'w', encoding="utf-8", newline='') as resultsfile: writer = csv.writer(resultsfile) writer.writerow(['f1', 'sigma_psd', 'cutoff', 'a', 'b']) writer.writerow([f1, sigma, cutoff, a, b]) results.append(result) # Sort largest to smallest, by F1 score results.sort(key=lambda tup: tup[0], reverse=True) print("Finished tuning parameters.") print("The top 3 results were:") f1, sigma, cutoff, a, b = results[0] print("F1: {}, sigma_val: {}, cutoff_freq: {}, a: {}, b: {}".format( f1, sigma, cutoff, a, b)) f1, sigma, cutoff, a, b = results[1] print("F1: {}, sigma_val: {}, cutoff_freq: {}, a: {}, b: {}".format( f1, sigma, cutoff, a, b)) f1, sigma, cutoff, a, b = results[2] print("F1: {}, sigma_val: {}, cutoff_freq: {}, a: {}, b: {}".format( f1, sigma, cutoff, a, b))
def main(): # Parse Args. # 'scale' allows the image_scaled scale to be set. Eg: 0.25, 0.5, 1.0 argList = sys.argv[1:] shortArg = 'a:d:t:s:S:k:rn:i:me' longArg = [ 'algorithm=', 'dataset=', 'train-ratio=', 'scale=', 'test-scale=', 'folds=', 'rotations', 'noise=', 'noise-intensity=', 'multiprocess', 'example', 'data-ratio=', 'mrlbp-classifier=', 'noise-train', 'ecs', 'debug' ] valid_algorithms = [ 'RLBP', 'MRLBP', 'MRELBP', 'BM3DELBP', 'NoiseClassifier' ] valid_datasets = ['kylberg'] valid_noise = ['gaussian', 'speckle', 'salt-pepper'] valid_mrlbp_classifiers = ['svm', 'knn'] try: args, vals = getopt.getopt(argList, shortArg, longArg) for arg, val in args: if arg in ('-a', '--algorithm'): if val in valid_algorithms: print('Using algorithm:', val) GlobalConfig.set("algorithm", val) else: raise ValueError( 'Invalid algorithm configured, choose one of the following:', valid_algorithms) elif arg in ('-d', '--dataset'): if val in valid_datasets: print("Using dataset:", val) GlobalConfig.set("dataset", val) else: raise ValueError( 'Invalid dataset configured, choose one of the following:', valid_datasets) elif arg in ('-t', '--train-test'): if 0 < float(val) <= 1.0: print('Using train-ratio ratio of', val) GlobalConfig.set('train_ratio', float(val)) else: raise ValueError( 'Train-test ratio must be 0 < train-test <= 1.0') elif arg in ('-s', '--scale'): if 0 < float(val) <= 1.0: print('Using training image scale:', val) GlobalConfig.set('scale', float(val)) else: raise ValueError('Scale must be 0 < scale <= 1.0') elif arg in ('-S', '--test-scale'): if 0 < float(val) <= 1.0: print('Using testing image scale:', val) GlobalConfig.set('test_scale', float(val)) else: raise ValueError('Test scale must be 0 < scale <= 1.0') elif arg in ('-k', '--folds'): print('Doing {} folds'.format(val)) GlobalConfig.set("folds", int(val)) elif arg in ('-r', '--rotations'): print('Using rotated image_scaled sources') GlobalConfig.set("rotate", True) elif arg in ('-n', '--noise'): if val in valid_noise: print('Applying noise:', val) GlobalConfig.set("noise", val) else: raise ValueError( 'Invalid noise type, choose one of the following:', valid_noise) elif arg in ('-i', '--noise-intensity'): print('Using noise intensity (sigma / ratio) of:', val) GlobalConfig.set("noise_val", float(val)) elif arg in ('-m', '--multiprocess'): cores = psutil.cpu_count() print('Using {} processor cores for computing featurevectors'. format(cores)) GlobalConfig.set('multiprocess', True) GlobalConfig.set('cpu_count', cores) elif arg in ('-e', '--example'): print('Generating algorithm example image_scaled') GlobalConfig.set('examples', True) elif arg == '--data-ratio': if 0 < float(val) <= 1.0: print('Using dataset ratio:', val) GlobalConfig.set('data_ratio', float(val)) else: raise ValueError('Data ratio must be 0 < ratio <= 1.0') elif arg == '--mrlbp-classifier': if val in valid_mrlbp_classifiers: print( "MRLBP algorithm (if configured) will use {} classifier" .format(val)) GlobalConfig.set('mrlbp_classifier', val) else: raise ValueError( 'Invalid classifier chosen for mrlbp, choose one of the following:', valid_mrlbp_classifiers) elif arg == '--noise-train': print( "Applying noise to the training dataset as well as the test dataset" ) GlobalConfig.set('train_noise', True) elif arg == '--ecs': print("Loading dataset from C:\Local") GlobalConfig.set('ECS', True) elif arg == '--debug': print("Running in debug mode") GlobalConfig.set('debug', True) else: raise ValueError('Unhandled argument provided:', arg) except getopt.error as err: print(str(err)) if GlobalConfig.get('ECS'): GlobalConfig.set( 'CWD', r'\\filestore.soton.ac.uk\users\ojvl1g17\mydocuments\COMP3200-Texture-Classification' ) else: GlobalConfig.set('CWD', os.getcwd()) if GlobalConfig.get('examples'): write_examples() # Load configured Dataset if GlobalConfig.get('dataset') == 'kylberg': if GlobalConfig.get('debug'): # To save time in debug mode, only load one class and load a smaller proportion of it (25% of samples) kylberg = DatasetManager.KylbergTextures( num_classes=2, data_ratio=GlobalConfig.get('data_ratio')) else: kylberg = DatasetManager.KylbergTextures( num_classes=28, data_ratio=GlobalConfig.get('data_ratio')) # Load Dataset & Cross Validator dataset = kylberg.load_data() cross_validator = kylberg.get_cross_validator() print("Dataset loaded") elif GlobalConfig.get('dataset') is None: raise ValueError('No Dataset configured') else: raise ValueError('Invalid dataset') if GlobalConfig.get('rotate'): dataset_folder = GlobalConfig.get('dataset') + '-rotated' else: dataset_folder = GlobalConfig.get('dataset') out_folder = os.path.join(GlobalConfig.get('CWD'), 'out', GlobalConfig.get('algorithm'), dataset_folder) # Initialise algorithm if GlobalConfig.get('algorithm') == 'RLBP': print("Applying RLBP algorithm") algorithm = RLBP.RobustLBP() elif GlobalConfig.get('algorithm') == 'MRLBP': print("Applying MRLBP algorithm") algorithm = RLBP.MultiresolutionLBP(p=[8, 16, 24], r=[1, 2, 3]) elif GlobalConfig.get('algorithm') == 'MRELBP': print("Applying MRELBP algorithm") algorithm = MRELBP.MedianRobustExtendedLBP(r1=[2, 4, 6, 8], p=8, w_center=3, w_r1=[3, 5, 7, 9]) elif GlobalConfig.get('algorithm') == 'BM3DELBP': print("Applying BM3DELBP algorithm") algorithm = BM3DELBP.BM3DELBP() elif GlobalConfig.get('algorithm') == 'NoiseClassifier': # Noise Classifier is used in BM3DELBP algorithm usually, this allows for benchmarking of the classifier alone algorithm = NoiseClassifier.NoiseClassifier() pass else: raise ValueError('Invalid algorithm choice') # Get the Training out directory (i.e. Images without scaling/rotation/noise) train_out_dir = os.path.join( out_folder, algorithm.get_outdir(noisy_image=False, scaled_image=False)) # Get the Testing out directory (i.e. Images with scaling/rotation/noise) if GlobalConfig.get('noise') is not None: noisy_image = True else: noisy_image = False if GlobalConfig.get('test_scale') is not None: scaled_image = True else: scaled_image = False test_out_dir = os.path.join( out_folder, algorithm.get_outdir(noisy_image, scaled_image)) # Out path for noise classifier noise_out_dir = os.path.join( GlobalConfig.get('CWD'), 'out', 'NoiseClassifier', dataset_folder, "scale-{}".format(int(GlobalConfig.get('scale') * 100))) test_noise_out_dir = os.path.join( GlobalConfig.get('CWD'), 'out', 'NoiseClassifier', dataset_folder, algorithm.get_outdir(noisy_image, scaled_image)) print("Replacing DatasetManager.Image with BM3DELBPImages") # Convert DatasetManager.Image into BM3DELBP.BM3DELBPImage if GlobalConfig.get('algorithm') == 'NoiseClassifier' or GlobalConfig.get( 'algorithm') == 'BM3DELBP': for index, img in enumerate(dataset): dataset[index] = BM3DELBP.BM3DELBPImage(img) # Also convert rotated images if necessary if img.test_rotations is not None: for index, rotated_img in enumerate(img.test_rotations): img.test_rotations[index] = BM3DELBP.BM3DELBPImage( rotated_img) if GlobalConfig.get('multiprocess'): for index, img in enumerate(dataset): dataset[index] = (index, img) if GlobalConfig.get('rotate'): maxtasks = 50 else: maxtasks = None if GlobalConfig.get( 'algorithm') == 'NoiseClassifier' or GlobalConfig.get( 'algorithm') == 'BM3DELBP': with Pool(processes=GlobalConfig.get('cpu_count'), maxtasksperchild=maxtasks) as pool: # Generate image noise featurevectors for index, image in tqdm.tqdm(pool.istarmap( describe_noise_pool, zip(dataset, repeat(noise_out_dir), repeat(test_noise_out_dir))), total=len(dataset), desc='Noise Featurevectors'): dataset[index] = image else: with Pool(processes=GlobalConfig.get('cpu_count'), maxtasksperchild=maxtasks) as pool: # Generate featurevectors for index, image in tqdm.tqdm(pool.istarmap( describe_image_pool, zip(repeat(algorithm), dataset, repeat(train_out_dir), repeat(test_out_dir))), total=len(dataset), desc='Texture Featurevectors'): dataset[index] = image else: # Process the images without using multiprocessing Pools if GlobalConfig.get( 'algorithm') == 'NoiseClassifier' or GlobalConfig.get( 'algorithm') == 'BM3DELBP': for index, img in enumerate(dataset): # Generate image noise featurevectors describe_noise(img, noise_out_dir, test_noise_out_dir) else: print("BEGINNING TIMER:") start = timer() for index, img in enumerate(dataset): # Generate featurevetors describe_image(algorithm, img, train_out_dir, test_out_dir) end = timer() print("TIME TAKEN:", end - start) # Train models and perform predictions if GlobalConfig.get('algorithm') == 'RLBP': predictor = RLBP.RobustLBPPredictor(dataset, cross_validator) elif GlobalConfig.get('algorithm') == 'MRLBP': print("Performing MRLBP Classification") predictor = RLBP.MultiresolutionLBPPredictor(dataset, cross_validator) elif GlobalConfig.get('algorithm') == 'MRELBP': print("Performing MRELBP Classification") predictor = MRELBP.MedianRobustExtendedLBPPredictor( dataset, cross_validator) elif GlobalConfig.get('algorithm') == 'BM3DELBP': print("Performing BM3DELBP Classification") predictor = BM3DELBP.BM3DELBPPredictor(dataset, cross_validator) elif GlobalConfig.get('algorithm') == 'NoiseClassifier': print("Applying noise classifier") predictor = BM3DELBP.NoiseTypePredictor(dataset, cross_validator) else: raise ValueError('Invalid algorithm choice') # Get the test label & test prediction for every fold of cross validation y_test, y_predicted = predictor.begin_cross_validation() if GlobalConfig.get('algorithm') == 'NoiseClassifier': if GlobalConfig.get('noise') is None: classes = ['no-noise', 'gaussian', 'speckle', 'salt-pepper'] else: classes = ['gaussian', 'speckle', 'salt-pepper'] else: classes = kylberg.classes # Display confusion matrix ClassificationUtils.pretty_print_conf_matrix( y_test, y_predicted, classes, title='{} Confusion Matrix'.format(GlobalConfig.get('algorithm')), out_dir=test_out_dir) # Display classification report ClassificationUtils.make_classification_report(y_test, y_predicted, classes, test_out_dir)
class MainWindow(QMainWindow): def __init__(self, *args, **kwargs): super(MainWindow, self).__init__(*args, **kwargs) self.setupUi() self.initData() def setupUi(self): self.setWindowTitle("饥荒联机版服务器管理工具") # 设置初始化的窗口大小 self.setFixedSize(1085, 700) # 最开始窗口要居中显示 self.center() # 设置整体布局 左右显示 pagelayout = QGridLayout() ''' 开始左上侧布局 ''' # 创建左上侧部件 top_left_frame = QFrame(self) top_left_frame.setFrameShape(QFrame.StyledPanel) # 按钮垂直布局 top_button_layout = QVBoxLayout(top_left_frame) # 五个存档槽按钮 self.cluster_btns = {} for b_index in range(5): self.cluster_btns[b_index] = QPushButton(top_left_frame) self.cluster_btns[b_index].setFixedSize(180, 30) self.cluster_btns[b_index].setText("存档槽 " + str(b_index + 1)) # cluster_btns[b_index].setEnabled(False) self.cluster_btns[b_index].index = b_index + 1 self.cluster_btns[b_index].clicked.connect(self.set_cluster) top_button_layout.addWidget(self.cluster_btns[b_index]) # 删除存档按钮 delete_cluster_btn = QPushButton(top_left_frame) delete_cluster_btn.setFixedSize(180, 30) delete_cluster_btn.setText("删除存档") top_button_layout.addWidget(delete_cluster_btn) delete_cluster_btn.clicked.connect(self.deleteCluster) # 导出远程存档按钮 export_remote_cluster_btn = QPushButton(top_left_frame) export_remote_cluster_btn.setText("导出远程存档") export_remote_cluster_btn.setFixedSize(180, 30) top_button_layout.addWidget(export_remote_cluster_btn) # 导入本地存档按钮 import_local_cluster_btn = QPushButton(top_left_frame) import_local_cluster_btn.setText("导入本地存档") import_local_cluster_btn.setFixedSize(180, 30) top_button_layout.addWidget(import_local_cluster_btn) ''' 开始左下侧布局 ''' # 创建左下侧部件 bottom_left_frame = QFrame(self) bottom_left_frame.setFrameShape(QFrame.StyledPanel) # 按钮垂直布局 bottom_button_layout = QVBoxLayout(bottom_left_frame) # 控制台按钮 console_btn = QPushButton(bottom_left_frame) console_btn.setText("控制台") console_btn.setFixedSize(180, 30) bottom_button_layout.addWidget(console_btn) # 软件设置按钮 settings_btn = QPushButton(bottom_left_frame) settings_btn.setText("软件设置") settings_btn.setFixedSize(180, 30) settings_btn.clicked.connect(self.soft_settings) bottom_button_layout.addWidget(settings_btn) # 导出远程存档按钮 browser_online_server_btn = QPushButton(bottom_left_frame) browser_online_server_btn.setText("浏览在线服务器") browser_online_server_btn.setFixedSize(180, 30) bottom_button_layout.addWidget(browser_online_server_btn) # 导出远程存档按钮 help_and_about_btn = QPushButton(bottom_left_frame) help_and_about_btn.setText("使用帮助和关于") help_and_about_btn.setFixedSize(180, 30) bottom_button_layout.addWidget(help_and_about_btn) # 弹簧控件 spacerItem = QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding) bottom_button_layout.addItem(spacerItem) ''' 开始右侧布局 ''' # 右侧开始布局 对应按钮布局 right_frame = QFrame(self) right_frame.setFrameShape(QFrame.StyledPanel) # 右边显示为stack布局 self.right_layout = QStackedLayout(right_frame) self.settings_widget = SettingsWidget() self.cluster_tab = MainTab() self.right_layout.addWidget(self.cluster_tab) self.right_layout.addWidget(self.settings_widget) # 划分界面 splitterV = QSplitter(Qt.Vertical) splitterV.addWidget(top_left_frame) splitterV.addWidget(bottom_left_frame) # 固定左上高度 top_left_frame.setFixedHeight(330) splitterH = QSplitter(Qt.Horizontal) splitterH.addWidget(splitterV) splitterH.addWidget(right_frame) # 固定左侧宽度 top_left_frame.setFixedWidth(200) # 窗口部件添加布局 widget = QWidget() pagelayout.addWidget(splitterH) widget.setLayout(pagelayout) self.setCentralWidget(widget) # 按钮函数绑定 def soft_settings(self): self.right_layout.setCurrentIndex(1) # 存档按钮状态刷新 def refresh_cluster_btn_state(self, index): for i in self.cluster_btns: if index == self.cluster_btns[i].index: self.cluster_btns[i].setStyleSheet("") else: self.cluster_btns[i].setStyleSheet("color:gray") # 存档设置 def set_cluster(self): self.current_cluster_index = self.sender().index self.tempconfig.set("TEMP", "cluster_index", str(self.current_cluster_index)) self.tempconfig.save(TEMP_FILE) self.refresh_cluster_btn_state(self.current_cluster_index) self.right_layout.setCurrentIndex(0) self.mk_cluster_dir() self.cluster_tab.cluster_settings_tab.current_cluster_file = os.path.join( self.current_cluster_folder, "cluster.ini") self.cluster_tab.cluster_settings_tab.read_cluster_data( self.cluster_tab.cluster_settings_tab.current_cluster_file) self.cluster_tab.cluster_settings_tab.setServerIP( self.cluster_tab.cluster_settings_tab.masterip, self.cluster_tab.cluster_settings_tab.getServerIP()) self.cluster_tab.shard_settings_tab.initShardTab() def deleteCluster(self): cindex = self.current_cluster_index delm = QMessageBox.warning(self, "删除警告", "你确定要删除存档槽" + str(cindex) + "?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if delm == QMessageBox.Yes: sdir = os.path.join(CLUSTER_DIR, "Cluster_" + str(cindex)) if os.path.exists(sdir): shutil.rmtree(sdir) self.right_layout.setCurrentIndex(0) self.mk_cluster_dir() self.cluster_tab.setCurrentIndex(0) self.cluster_tab.cluster_settings_tab.current_cluster_file = os.path.join( self.current_cluster_folder, "cluster.ini") self.cluster_tab.cluster_settings_tab.read_cluster_data( self.cluster_tab.cluster_settings_tab.current_cluster_file) self.cluster_tab.cluster_settings_tab.setServerIP( self.cluster_tab.cluster_settings_tab.masterip, self.cluster_tab.cluster_settings_tab.getServerIP()) self.cluster_tab.shard_settings_tab.initShardTab() QMessageBox.information(self, "删除完毕", "存档槽" + str(cindex) + "已删除并重置!", QMessageBox.Yes) def mk_cluster_dir(self): self.current_cluster_folder = os.path.join( CLUSTER_DIR, "Cluster_" + str(self.current_cluster_index)) if not os.path.exists(self.current_cluster_folder): os.mkdir(self.current_cluster_folder) def init_cluster_data(self, index): self.mk_cluster_dir() self.right_layout.setCurrentIndex(0) self.refresh_cluster_btn_state(self.current_cluster_index) self.cluster_tab.cluster_settings_tab.current_cluster_file = os.path.join( self.current_cluster_folder, "cluster.ini") self.cluster_tab.cluster_settings_tab.read_cluster_data( self.cluster_tab.cluster_settings_tab.current_cluster_file) def initDir(self): if not os.path.exists(ROOT_DIR): os.mkdir(ROOT_DIR) if not os.path.exists(CLUSTER_DIR): os.mkdir(CLUSTER_DIR) if not os.path.exists(TEMP_FILE): self.tempconfig = GlobalConfig(TEMP_FILE) self.tempconfig.add_section("TEMP") self.tempconfig.set("TEMP", "cluster_index", "1") self.tempconfig.save(TEMP_FILE) def initData(self): self.initDir() self.tempconfig = GlobalConfig(TEMP_FILE) if os.path.exists(TEMP_FILE): self.current_cluster_index = int( self.tempconfig.get("TEMP", "cluster_index")) else: self.current_cluster_index = 1 self.init_cluster_data(self.current_cluster_index) # 设置窗口居中 def center(self): screen = QDesktopWidget().screenGeometry() size = self.geometry() self.move((screen.width() - size.width()) / 2, (screen.height() - size.height()) / 2)