def mnist_test(): print('[INFO] 解析配置…') parser = None config = None model_path = None try: args, parser = get_test_args() config = process_config(args.config) model_path = args.model except Exception as e: print('[Exception] 配置无效, %s' % e) if parser: parser.print_help() print('[Exception] 参考: python main_test.py -c configs/simple_mnist_config.json ' '-m simple_mnist.weights.10-0.24.hdf5') exit(0) np.random.seed(47) print('[INFO] 加载数据…') dl = MnistDL() test_data = dl.get_test_data()[0] print(test_data.shape) test_label = dl.get_test_data()[1] print(test_label.shape) print('[INFO] 测试模型…') infer = MnistInfer(model_path, config) result = infer.model.evaluate(test_data, test_label, batch_size=config.batch_size) print('[INFO] loss: %.4f, accuracy: %.4f' % (result[0], result[1])) print('[INFO] 测试完成…')
def main_train(): """ 训练模型 :return: """ print '[INFO] 解析配置...' parser = None config = None # try: # args, parser = get_train_args() # config = process_config(args.config) # except Exception as e: # print '[Exception] 配置无效, %s' % e # if parser: # parser.print_help() # print '[Exception] 参考: python main_train.py -c configs/triplet_config.json' # exit(0) config = process_config('configs/triplet_config.json') print '[INFO] 加载数据...' dl = TripletDL(config=config) print '[INFO] 构造网络...' model = TripletModelMxnet(config=config) print '[INFO] 训练网络...' trainer = TripletTrainerMxnet( model=model.model, data=[dl.get_train_data(), dl.get_test_data()], config=config) trainer.train() print '[INFO] 训练完成...'
def main_test(): print('[INFO] 解析配置...') parser = None config = None model_path = None try: args, parser = get_test_args() config = process_config(args.config) model_path = args.model except Exception as e: print('[Exception] 配置无效, %s' % e) if parser: parser.print_help() print ('[Exception] 参考: python main_test.py -c configs/simple_mnist_config.json ' \ '-m simple_mnist.weights.10-0.24.hdf5') exit(0) # config = process_config('configs/simple_mnist_config.json') np.random.seed(47) # 固定随机数 print('[INFO] 加载数据...') dl = SimpleMnistDL() test_data = np.expand_dims(dl.get_test_data()[0][0], axis=0) test_label = np.argmax(dl.get_test_data()[1][0]) print('[INFO] 预测数据...') # infer = SimpleMnistInfer("simple_mnist.weights.16-0.19.hdf5", config) infer = SimpleMnistInfer(model_path, config) infer_label = np.argmax(infer.predict(test_data)) print('[INFO] 真实Label: %s, 预测Label: %s' % (test_label, infer_label)) print('[INFO] 预测完成...')
def test_vgg_manga(): manga_dir = 'manga109_frame_face' print('[INFO] 解析配置…') parser = None config = None model_path = None try: args, parser = get_test_args() config = process_config(args.config) model_path = args.model except Exception as e: print('[Exception] 配置无效, %s' % e) if parser: parser.print_help() print( '[Exception] 参考: python main_test.py -c configs/simple_mnist_config.json ' '-m simple_mnist.weights.10-0.24.hdf5') exit(0) np.random.seed(47) print('[INFO] 加载数据…') dl = VGGMangaDL(config=config, manga_dir=manga_dir) test_generator = dl.get_test_data() print('[INFO] 测试模型…') infer = VGGMangaInfer(model_path, config) result = infer.model.evaluate_generator(test_generator) print(infer.model.metrics_names) print(result) # print('[INFO] loss: %.4f, accuracy: %.4f' % (result[0], result[1])) print('[INFO] 测试完成…')
def main_train(): """ 训练模型 :return: """ print '[INFO] 解析配置...' parser = None config = None try: args, parser = get_train_args() config = process_config(args.config) except Exception as e: print '[Exception] 配置无效, %s' % e if parser: parser.print_help() print '[Exception] 参考: python main_train.py -c configs/simple_mnist_config.json' exit(0) # config = process_config('configs/simple_mnist_config.json') print '[INFO] 加载数据...' dl = SimpleMnistDL(config=config) print '[INFO] 构造网络...' model = SimpleMnistModel(config=config) print '[INFO] 训练网络...' trainer = SimpleMnistTrainer( model=model.model, data=[dl.get_train_data(), dl.get_test_data()], config=config) trainer.train() print '[INFO] 训练完成...'
def test_main(): print '[INFO] 解析配置...' config = process_config('configs/simple_mnist_config.json') print '[INFO] 加载数据...' dl = DclLoader() test_data = np.expand_dims(dl.get_test_data()[0][0], axis=0) test_label = np.argmax(dl.get_test_data()[1][0]) print '[INFO] 预测数据...' infer = SimpleMnistInfer("simple_mnist.weights.16-0.19.hdf5", config) infer_label = np.argmax(infer.predict(test_data)) print '[INFO] 真实Label: %s, 预测Label: %s' % (test_label, infer_label) print '[INFO] 预测完成...'
def test_main(): ssl._create_default_https_context = ssl._create_unverified_context print('[INFO] 解析配置...') config = process_config('config/simple_mnist_config.json') print('[INFO] 加载数据...') dl = MnistDLExample() test_data = np.expand_dims(dl.get_test_data()[0][0], axis=0) test_label = np.argmax(dl.get_test_data()[1][0]) print('[INFO] 预测数据...') infer = MnistInferExample("simple_mnist.weights.10-0.21.hdf5", config) infer_label = np.argmax(infer.predict(test_data)) print('[INFO] 真实Label: %s, 预测Label: %s' % (test_label, infer_label)) print('[INFO] 预测完成...')
def train_vgg_manga(): print('[INFO] 解析配置…') parser = None config = None model_path = None try: args, parser = get_train_args() config = process_config(args.config) model_path = args.pre_train except Exception as e: print('[Exception] 配置无效, %s' % e) if parser: parser.print_help() print( '[Exception] 参考: python main_train.py -c configs/simple_mnist_config.json' ) exit(0) np.random.seed(47) print('[INFO] 加载数据…') dl = FaceNetDL(config=config) print('[INFO] 构造网络…') if config.backbone == 'vgg': print('[INFO] 使用 VGG 作为骨架') elif config.backbone == 'alexnet': print('[INFO] 使用 AlexNet 作为骨架') else: print('[INFO] 使用多层 CNN 作为骨架') if model_path != 'None': model = MangaFaceNetModel(config=config, model_path=model_path) else: model = MangaFaceNetModel(config=config) print('[INFO] 训练网络') trainer = VGGMangaTrainer( model=model.model, data=[dl.get_train_data(), dl.get_validation_data()], config=config) trainer.train() print('[INFO] 训练完成…')
def test_main(): print('[INFO] Retrieving configuration...') parser = None config = None set_random_seed(0) try: args, parser = get_test_args() # args.config = 'experiments/wmcnn/wmcnn.json' # args.config = 'configs/stacksr.json' # args.config = 'configs/stacksr_3vdsr.json' config = process_config(args.config, False) except Exception as e: print('[Exception] Configuration is invalid, %s' % e) if parser: parser.print_help() print( '[Exception] Refer to: python main_train.py -c configs/lapsrn.json' ) exit(0) print('[INFO] Building graph...') try: Net = importlib.import_module('models.{}'.format( config['trainer']['net'])).Net model = Net(config=config['model']) # model = Net() if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # print_network(model) except ModuleNotFoundError: raise RuntimeWarning( "The model name is incorrect or does not exist! Please check!") for path in config['test_data_loader']['test_path']: test_config = config['test_data_loader'] test_config['test_path'] = path print('[INFO] Loading data...') dl = TestImageLoader(config=test_config) print('[INFO] Predicting...') infer = SRInfer(model, config['trainer']) infer.predict(dl.get_test_mat(), testset=config['test_data_loader']['test_path'], upscale=config['test_data_loader']['upscale'])
def train_vgg_mnist(): print('[INFO] 加载数据…') config_str = 'configs/try_vgg_manga.json' config = process_config(config_str) np.random.seed(47) print('[INFO] 加载数据…') dl = FaceNetDL(config=config) base_model = tf.keras.applications.vgg16.VGG16(weights=None, include_top=False, input_shape=(224, 224, 1)) x = base_model.output x = tf.keras.layers.Flatten()(x) # x = tf.keras.layers.Dense(config.fc1_num, activation='relu')(x) predictions = tf.keras.layers.Dense(2, activation='sigmoid')(x) model = tf.keras.Model(inputs=base_model.input, outputs=predictions) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) print('[INFO] 训练网络') model_save_path = 'experiments/try_vgg_manga/checkpoints/' mkdir_if_not_exist(model_save_path) cp_callback = tf.keras.callbacks.ModelCheckpoint(os.path.join( model_save_path, 'mnist_weights.hdf5'), verbose=1, save_weights_only=False, monitor='loss', mode='min', save_best_only=True) model.fit_generator(dl.get_train_data(), epochs=config.num_epochs, verbose=2, validation_data=dl.get_validation_data(), callbacks=[cp_callback]) print('[INFO] 训练完成…') model.evaluate_generator(dl.get_test_data()) print('[INFO] 测试完成…')
def train_vgg_manga(): manga_dir = 'manga109_frame_face' print('[INFO] 解析配置…') parser = None config = None model_path = None try: args, parser = get_train_args() config = process_config(args.config) model_path = args.pre_train except Exception as e: print('[Exception] 配置无效, %s' % e) if parser: parser.print_help() print( '[Exception] 参考: python main_train.py -c configs/simple_mnist_config.json' ) exit(0) np.random.seed(47) print('[INFO] 加载数据…') dl = VGGMangaDL(config=config, manga_dir=manga_dir) print('[INFO] 构造网络…') if model_path != 'None': model = VGGMangaSimpleModel(config=config, model_path=model_path) else: model = VGGMangaSimpleModel(config=config) print('[INFO] 训练网络') trainer = VGGMangaTrainer( model=model.model, data=[dl.get_train_data(), dl.get_validation_data()], config=config) trainer.train() print('[INFO] 训练完成…')
def main_train(): """ 定义 训练模型 函数 | Definition training model function 参考: NumPy FutureWarning https://stackoverflow.com/questions/48340392/futurewarning-conversion-of-the-second-argument-of-issubdtype-from-float-to """ ssl._create_default_https_context = ssl._create_unverified_context print('[INFO] 解析配置...') parser = None config = None # try: # args, parser = get_train_args() # config = process_config(args.config) # except Exception as e: # print('[Exception] 配置无效, %s' % e) # if parser: # parser.print_help() # print('[Exception] 参考: python main_train.py -c config/simple_mnist_config.json') # exit(0) config = process_config('config/simple_mnist_config.json') np.random.seed(47) # 固定随机数 print('[INFO] 加载数据...') dl = MnistDLExample(config=config) print('[INFO] 构造网络...') model = MnistModelExample(config=config) print('[INFO] 训练网络...') trainer = MnistTrainerExample( model=model.model, data=[dl.get_train_data(), dl.get_test_data()], config=config) trainer.train() print('[INFO] 训练完成...')
def main_test(): ''' 定义 测试 函数 | Define test function ''' ssl._create_default_https_context = ssl._create_unverified_context print('[INFO] 解析配置...') parser = None config = None model_path = None try: args, parser = get_test_args() config = process_config(args.config) model_path = args.model except Exception as e: print('[Exception] 配置无效, %s' % e) if parser: parser.print_help() print( '[Exception] 参考: python main_test.py -c config/simple_mnist_config.json ' '-m simple_mnist.weights.10-0.24.hdf5') exit(0) # config = process_config('config/simple_mnist_config.json') np.random.seed(47) # 固定随机数 print('[INFO] 加载数据...') dl = MnistDLExample() test_data = np.expand_dims(dl.get_test_data()[0][0], axis=0) test_label = np.argmax(dl.get_test_data()[1][0]) print('[INFO] 预测数据...') # infer = MnistInferExample("simple_mnist.weights.16-0.19.hdf5", config) infer = MnistInferExample(model_path, config) infer_label = np.argmax(infer.predict(test_data)) print('[INFO] 真实Label: %s, 预测Label: %s' % (test_label, infer_label)) print('[INFO] 预测完成...')
def main_test(): print '[INFO] 解析配置...' parser = None config = None # # try: # args, parser = get_test_args() # config = process_config(args.config) # except Exception as e: # print '[Exception] 配置无效, %s' % e # if parser: # parser.print_help() # print '[Exception] 参考: python main_test.py -c configs/triplet_config.json' # exit(0) config = process_config('configs/triplet_config.json') print '[INFO] 加载数据...' print '[INFO] 预测数据...' infer = TripletInfer(config=config) infer.default_dist() infer.test_dist() print '[INFO] 预测完成...'
def main_train(): """ 训练模型 :return: """ print '[INFO] 解析配置...' parser = None config = None # try: # args, parser = get_train_args() # config = process_config(args.config) # except Exception as e: # print '[Exception] 配置无效, %s' % e # if parser: # parser.print_help() # print '[Exception] 参考: python main_train.py -c configs/simple_mnist_config.json' # exit(0) config = process_config('configs/dcl_config.json') np.random.seed(47) # 固定随机数 print '[INFO] 加载数据...' dl = DclLoader(config=config) print '[INFO] 构造网络...' model = DclModel(config=config) print '[INFO] 训练网络...' trainer = DclTrainer(model=model.model, data=[dl.get_train_data(), dl.get_test_data()], config=config) trainer.train() print '[INFO] 训练完成...'
def train_main(): """ 训练模型 :return: """ print('[INFO] Retrieving configuration...') # import torch # print(torch.__version__) parser = None args = None config = None # TODO: modify the path of best checkpoint after training try: args, parser = get_train_args() # args.config = 'experiments/stacksr lr=1e-3 28init 3x/stacksr.json' # args.config = 'configs/lapsrn.json' config = process_config(args.config) shutil.copy2(args.config, os.path.join("experiments", config['exp_name'])) except Exception as e: print('[Exception] Configuration is invalid, %s' % e) if parser: parser.print_help() print('[Exception] Refer to: python main_train.py -c configs/wmcnn.json') exit(0) # config = process_config('configs/train_textcnn.json') # np.random.seed(config.seed) # 固定随机数 print('[INFO] Loading data...') dl = ImageLoader(config=config['train_data_loader']) print('[INFO] Building graph...') try: Net = importlib.import_module('models.{}'.format(config['trainer']['net'])).Net model = Net(config=config['model']) print_network(model) except ModuleNotFoundError: raise RuntimeWarning("The model name is incorrect or does not exist! Please check!") # if config['distributed']: # os.environ['MASTER_ADDR'] = '127.0.0.1' # os.environ['MASTER_PORT'] = '29500' # torch.distributed.init_process_group(backend='nccl', world_size=4, rank=2) print('[INFO] Training the graph...') # trainer = SRTrainer( # model=model, # data={'train': dl.get_train_data(), 'test': dl.get_test_data()}, # config=config['trainer']) os.environ['CUDA_LAUNCH_BLOCKING'] = '1' trainer = SRTrainer( model=model, data={'train': dl.get_wmcnn_hdf5_data(), 'test': dl.get_test_data()}, # data={'train': dl.get_hdf5_data(), 'test': dl.get_test_data()}, config=config['trainer']) highest_score, best_model = trainer.train() with open(os.path.join("experiments", config['exp_name'], 'performance.txt'), 'w') as f: f.writelines(str(highest_score)) json_file = os.path.join("./experiments", config['exp_name'], os.path.basename(args.config)) with open(json_file, 'w') as file_out: config['trainer']['checkpoint'] = best_model json.dump(config, file_out, indent=2) print('[INFO] Training is completed.')
def train_main(): """ 训练模型 :return: """ print('[INFO] Retrieving configuration...') parser = None args = None config = None # TODO: modify the path of best checkpoint after training try: args, parser = get_train_args() # args.config = 'experiments/stacksr lr=1e-3 28init 2x/stacksr.json' # args.config = 'configs/lapsrn.json' config = process_config(args.config) shutil.copy2(args.config, os.path.join("experiments", config['exp_name'])) except Exception as e: print('[Exception] Configuration is invalid, %s' % e) if parser: parser.print_help() print( '[Exception] Refer to: python main_train.py -c configs/rrgun.json') exit(0) # config = process_config('configs/train_textcnn.json') # np.random.seed(config.seed) # 固定随机数 print('[INFO] Loading data...') torch.backends.cudnn.benchmark = True dl = ImageLoader(config=config['train_data_loader']) print('[INFO] Building graph...') try: Net = importlib.import_module('models.{}'.format( config['trainer']['net'])).Net model = Net(config=config['model']) if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) print_network(model) except ModuleNotFoundError: raise RuntimeWarning( "The model name is incorrect or does not exist! Please check!") print('[INFO] Training the graph...') trainer = SRTrainer( model=model, data={ 'train': dl.get_hdf5_sample_data(), 'test': dl.get_test_data() }, # data={'train': dl.get_hdf5_data(), 'test': dl.get_test_data()}, config=config['trainer']) highest_score, best_model = trainer.train() with open( os.path.join("experiments", config['exp_name'], 'performance.txt'), 'w') as f: f.writelines(str(highest_score)) json_file = os.path.join("./experiments", config['exp_name'], os.path.basename(args.config)) with open(json_file, 'w') as file_out: config['trainer']['checkpoint'] = best_model json.dump(config, file_out, indent=2) print('[INFO] Training is completed.')
warnings.filterwarnings("ignore", category=UndefinedMetricWarning) precision, recall, f_score, support = precision_recall_fscore_support( val_y, prd_y) for p, r, f, s in zip(precision, recall, f_score, support): print( " — val_f1: % 0.4f — val_pre: % 0.4f — val_rec % 0.4f - ins %s" % (f, p, r, s)) if __name__ == "__main__": from utils.config_utils import process_config, get_test_args from models.auto_encoder_nn_model import AutoEncoderNN from data_loaders.data_load_cotton import CottonDL config = process_config( '/home/zhenye/Documents/modified_code/configs/cotton_ann.json') # 建立模型 model = AutoEncoderNN(config) # 获取数据 data_loader = CottonDL(config) train_data_x, train_data_y = data_loader.get_train_data_load() test_data_x, test_data_y = data_loader.get_test_data_load() validation_data_x, validation_data_y = data_loader.get_validation_data_load( ) data = [[train_data_x, train_data_y], [test_data_x, test_data_y], [validation_data_x, validation_data_y]] # 进行训练 trainer = AutoEncoderTrainer(model=model, data=data, config=config)
def train_vgg_mnist(): print('[INFO] 加载数据…') config = process_config('configs/vgg_mnist_config.json') dl = FaceNetDL(config=config) main_input = tf.keras.Input(shape=(config.input_shape, config.input_shape, 1)) x = tf.keras.layers.Convolution2D(32, (3, 3), padding='same', name='conv1')(main_input) x = tf.keras.layers.BatchNormalization(name='bn1')(x) x = tf.keras.layers.ReLU()(x) x = tf.keras.layers.MaxPool2D((2, 2), strides=(2, 2), padding='same', name='pool1')(x) x = tf.keras.layers.Convolution2D(48, (3, 3), padding='same', name='conv2')(x) x = tf.keras.layers.BatchNormalization(name='bn2')(x) x = tf.keras.layers.ReLU()(x) x = tf.keras.layers.MaxPool2D((2, 2), strides=(2, 2), padding='same', name='pool2')(x) x = tf.keras.layers.Convolution2D(64, (3, 3), padding='same', name='conv3')(x) x = tf.keras.layers.BatchNormalization(name='bn3')(x) x = tf.keras.layers.ReLU()(x) x = tf.keras.layers.MaxPool2D((2, 2), strides=(2, 2), padding='same', name='pool3')(x) x = tf.keras.layers.Flatten(name='fl')(x) x = tf.keras.layers.Dense(3168, activation='relu', name='fc1')(x) x = tf.keras.layers.Dense(2, activation='softmax')(x) model = tf.keras.Model(inputs=main_input, outputs=x) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) print('[INFO] 训练网络') model_save_path = 'experiments/mnist_facenet/checkpoints' mkdir_if_not_exist(model_save_path) cp_callback = tf.keras.callbacks.ModelCheckpoint(os.path.join( model_save_path, 'mnist_facenet_weights.hdf5'), verbose=1, save_weights_only=False, monitor='val_loss', mode='min', save_best_only=True) model.fit_generator(dl.get_train_data(), epochs=config.num_epochs, validation_data=dl.get_validation_data(), callbacks=[cp_callback]) print('[INFO] 训练完成…') print('[INFO] 测试模型…') model.evaluate_generator(dl.get_test_data()) print('[INFO] 测试完成…')