コード例 #1
0
def finetune(args):
    # Load Paddlehub pretrained model, default as mobilenet
    module = hub.Module(name=args.module)
    input_dict, output_dict, program = module.context(trainable=True)

    # Download dataset and use ImageClassificationReader to read dataset
    dataset = hub.dataset.Flowers()
    data_reader = hub.reader.ImageClassificationReader(
        image_width=module.get_expected_image_width(),
        image_height=module.get_expected_image_height(),
        images_mean=module.get_pretrained_images_mean(),
        images_std=module.get_pretrained_images_std(),
        dataset=dataset)

    # The last 2 layer of resnet_v2_101_imagenet network
    feature_map = output_dict["feature_map"]

    img = input_dict["image"]
    feed_list = [img.name]

    # Select finetune strategy, setup config and finetune
    strategy = hub.DefaultFinetuneStrategy(learning_rate=args.learning_rate)
    config = hub.RunConfig(
        use_cuda=True,
        num_epoch=args.epochs,
        batch_size=args.batch_size,
        checkpoint_dir=args.checkpoint_dir,
        strategy=strategy)

    # Construct transfer learning network
    task = hub.ImageClassifierTask(
        data_reader=data_reader,
        feed_list=feed_list,
        feature=feature_map,
        num_classes=dataset.num_labels,
        config=config)

    # Load model from the defined model path or not
    if args.model_path != "":
        with task.phase_guard(phase="train"):
            task.init_if_necessary()
            task.load_parameters(args.model_path)
            logger.info("PaddleHub has loaded model from %s" % args.model_path)

    # Finetune by PaddleHub's API
    task.finetune()
    # Evaluate by PaddleHub's API
    run_states = task.eval()
    # Get acc score on dev
    eval_avg_score, eval_avg_loss, eval_run_speed = task._calculate_metrics(
        run_states)

    # Move ckpt/best_model to the defined saved parameters directory
    best_model_dir = os.path.join(config.checkpoint_dir, "best_model")
    if is_path_valid(args.saved_params_dir) and os.path.exists(best_model_dir):
        shutil.copytree(best_model_dir, args.saved_params_dir)
        shutil.rmtree(config.checkpoint_dir)

    # acc on dev will be used by auto finetune
    hub.report_final_result(eval_avg_score["acc"])
コード例 #2
0
def recognize():
    global flag
    module = hub.Module(name="resnet_v2_50_imagenet")
    dataset = DemoDataset()

    data_reader = hub.reader.ImageClassificationReader(
        image_width=module.get_expected_image_width(),
        image_height=module.get_expected_image_height(),
        images_mean=module.get_pretrained_images_mean(),
        images_std=module.get_pretrained_images_std(),
        dataset=dataset)

    config = hub.RunConfig(
        use_cuda=False,  # 是否使用GPU训练,默认为False;
        num_epoch=5,  # Fine-tune的轮数;
        checkpoint_dir="cv_finetune_turtorial_demo",  # 模型checkpoint保存路径, 若用户没有指定,程序会自动生成;
        batch_size=10,  # 训练的批大小,如果使用GPU,请根据实际情况调整batch_size;
        eval_interval=10,  # 模型评估的间隔,默认每100个step评估一次验证集;
        strategy=hub.finetune.strategy.DefaultFinetuneStrategy())  #Fine-tune优化策略;
        #strategy=hub.finetune.strategy.AdamWeightDecayStrategy())

    input_dict, output_dict, program = module.context(trainable=True)
    img = input_dict["image"]
    feature_map = output_dict["feature_map"]
    feed_list = [img.name]

    task = hub.ImageClassifierTask(
        data_reader=data_reader,
        feed_list=feed_list,
        feature=feature_map,
        num_classes=dataset.num_labels,
        config=config)

    label_map = dataset.label_dict()
    #run_states = task.finetune_and_eval()
    while 1:
        if flag is 1:
            data = []
            data.append("/home/xmy/PycharmProjects/test/paddle/proj3_recognizeMyself/temp_out/cap.jpg")
            index = 0
            run_states = task.predict(data=data)
            results = [run_state.run_results for run_state in run_states]

            for batch_result in results:
                batch_result = np.argmax(batch_result, axis=2)[0]
                for result in batch_result:
                    index += 1
                    result = label_map[result]
                    #print("input %i is %s, and the predict result is %s" %
                        #(index, data[index - 1], result))

            if "科比" in result:
                os.system("wmctrl -a \"pycharm\"")
            elif "库里" in result:
                os.system("wmctrl -a \"chrome\"")
            flag = 0
コード例 #3
0
def predict(args):
    # Load Paddlehub  pretrained model
    module = hub.Module(name=args.module)
    input_dict, output_dict, program = module.context(trainable=True)

    # Download dataset
    if args.dataset.lower() == "flowers":
        dataset = hub.dataset.Flowers()
    elif args.dataset.lower() == "dogcat":
        dataset = hub.dataset.DogCat()
    elif args.dataset.lower() == "indoor67":
        dataset = hub.dataset.Indoor67()
    elif args.dataset.lower() == "food101":
        dataset = hub.dataset.Food101()
    elif args.dataset.lower() == "stanforddogs":
        dataset = hub.dataset.StanfordDogs()
    else:
        raise ValueError("%s dataset is not defined" % args.dataset)

    # Use ImageClassificationReader to read dataset
    data_reader = hub.reader.ImageClassificationReader(
        image_width=module.get_expected_image_width(),
        image_height=module.get_expected_image_height(),
        images_mean=module.get_pretrained_images_mean(),
        images_std=module.get_pretrained_images_std(),
        dataset=dataset)

    feature_map = output_dict["feature_map"]

    # Setup feed list for data feeder
    feed_list = [input_dict["image"].name]

    # Setup runing config for PaddleHub Finetune API
    config = hub.RunConfig(
        use_data_parallel=False,
        use_cuda=args.use_gpu,
        batch_size=args.batch_size,
        checkpoint_dir=args.checkpoint_dir,
        strategy=hub.finetune.strategy.DefaultFinetuneStrategy())

    # Define a reading comprehension finetune task by PaddleHub's API
    task = hub.ImageClassifierTask(
        data_reader=data_reader,
        feed_list=feed_list,
        feature=feature_map,
        num_classes=dataset.num_labels,
        config=config)

    data = ["./test/test_img_daisy.jpg", "./test/test_img_roses.jpg"]
    print(task.predict(data=data, return_result=True))
コード例 #4
0
ファイル: main.py プロジェクト: yinyiyu/four-beauties
def human_classfication(data):
    '''
    使用前面训练好的图片进行人脸识别分类
    :param data: 要检测的图片的地址
    :return: 人脸的标签(是谁)
    '''
    module = hub.Module(name="resnet_v2_18_imagenet")
    dataset = DemoDataset()

    # 模型构建
    data_reader = hub.reader.ImageClassificationReader(
        image_width=module.get_expected_image_width(),
        image_height=module.get_expected_image_height(),
        images_mean=module.get_pretrained_images_mean(),
        images_std=module.get_pretrained_images_std(),
        dataset=dataset)

    config = hub.RunConfig(
        use_cuda=False,  # 是否使用GPU训练,默认为False;
        num_epoch=4,  # Fine-tune的轮数;
        checkpoint_dir="cv_finetune",  # 模型checkpoint保存路径, 若用户没有指定,程序会自动生成;
        batch_size=10,  # 训练的批大小,如果使用GPU,请根据实际情况调整batch_size;
        eval_interval=10,  # 模型评估的间隔,默认每100个step评估一次验证集;
        strategy=hub.finetune.strategy.DefaultFinetuneStrategy()
    )  # Fine-tune优化策略;
    # 组建FinetuneTask
    input_dict, output_dict, program = module.context(trainable=True)
    img = input_dict["image"]
    feature_map = output_dict["feature_map"]
    feed_list = [img.name]

    task = hub.ImageClassifierTask(data_reader=data_reader,
                                   feed_list=feed_list,
                                   feature=feature_map,
                                   num_classes=dataset.num_labels,
                                   config=config)

    task.load_checkpoint()

    # ##--------------开始预测

    label_map = dataset.label_dict()
    index = 0
    run_states = task.predict(data=data)
    results = [run_state.run_results for run_state in run_states]
    for batch_result in results:
        batch_result = np.argmax(batch_result, axis=2)[0]
        for result in batch_result:
            return result
コード例 #5
0
def finetune(args):
    module = hub.Module(name=args.module)
    input_dict, output_dict, program = module.context(trainable=True)

    if args.dataset.lower() == "flowers":
        dataset = hub.dataset.Flowers()
    elif args.dataset.lower() == "dogcat":
        dataset = hub.dataset.DogCat()
    elif args.dataset.lower() == "indoor67":
        dataset = hub.dataset.Indoor67()
    elif args.dataset.lower() == "food101":
        dataset = hub.dataset.Food101()
    elif args.dataset.lower() == "stanforddogs":
        dataset = hub.dataset.StanfordDogs()
    else:
        raise ValueError("%s dataset is not defined" % args.dataset)

    data_reader = hub.reader.ImageClassificationReader(
        image_width=module.get_expected_image_width(),
        image_height=module.get_expected_image_height(),
        images_mean=module.get_pretrained_images_mean(),
        images_std=module.get_pretrained_images_std(),
        dataset=dataset)

    feature_map = output_dict["feature_map"]

    img = input_dict["image"]
    feed_list = [img.name]

    config = hub.RunConfig(
        use_data_parallel=args.use_data_parallel,
        use_pyreader=args.use_pyreader,
        use_cuda=args.use_gpu,
        num_epoch=args.num_epoch,
        batch_size=args.batch_size,
        enable_memory_optim=False,
        checkpoint_dir=args.checkpoint_dir,
        strategy=hub.finetune.strategy.DefaultFinetuneStrategy())

    task = hub.ImageClassifierTask(
        data_reader=data_reader,
        feed_list=feed_list,
        feature=feature_map,
        num_classes=dataset.num_labels,
        config=config)
    task.finetune_and_eval()
コード例 #6
0
# 由于该数据设置是一个二分类的任务,而我们下载的分类module是在ImageNet数据集上训练的千分类模型,所以我们需要对模型进行简单的微调,把模型改造为一个二分类模型:
#
# 1. 获取module的上下文环境,包括输入和输出的变量,以及Paddle Program;
# 2. 从输出变量中找到特征图提取层feature_map;
# 3. 在feature_map后面接入一个全连接层,生成Task;

# In[16]:

input_dict, output_dict, program = module.context(trainable=True)
img = input_dict["image"]
feature_map = output_dict["feature_map"]
feed_list = [img.name]

task = hub.ImageClassifierTask(data_reader=data_reader,
                               feed_list=feed_list,
                               feature=feature_map,
                               num_classes=dataset.num_labels,
                               config=config)

# ### Step5、开始Finetune
#
# 我们选择`finetune_and_eval`接口来进行模型训练,这个接口在finetune的过程中,会周期性的进行模型效果的评估,以便我们了解整个训练过程的性能变化。

# In[22]:

run_states = task.finetune_and_eval()

# ### Step6、预测
#
# 当Finetune完成后,我们使用模型来进行预测,先通过以下命令来获取测试的图片