def main(fp_data: str, fp_dict: str, coarse_grained: bool):
    """SA with a rule-based model.

    :param fp_data: File path of the test data.
    :param fp_dict: File path of the sa dict.
    :param coarse_grained: Whether analyze in a coarse-grained way. (binary sa).
    :return: None.
    """

    # Reads data from the given dataset.
    reviews, trgs = utils.read_csv(fp_csv=fp_data, ignore_first_line=True)

    # Constructs a senti dict.
    senti_dict = SentiDict(fp_dalian=fp_dict)
    # Builds a senti model.
    senti_model = SentiModel(senti_dict=senti_dict)

    # Prediction.
    outs = senti_model.analyze(texts=reviews, coarse_grained=coarse_grained)

    # Evaluation.
    evaluate(trgs=trgs,
             outs=outs,
             detailed=True,
             fp_save=f"rule_based_{time.strftime('%m%d_%H%M')}.report")
示例#2
0
def quantize_inout(flcheck=None, ) -> None:
    """

    :param flcheck: a list, which length is 2 * num_layer, first num_layer items is fraction_length, other is is_quantization.
    """
    print('\n-- Start quantizing input and output. --')
    # if use flcheck load checkpoint.
    if flcheck:
        fraction_length = flchec[0:len(fraction_lengthƒ)]
        is_quantization = flcheck[len(is_quantization):len(flcheck)]

    for layer in range(process(is_quantization), len(is_quantization)):
        layer_name = '.'.join(params[layer][0].split('.')[0:-1])
        print('\n-- Quantizing layer:{}\'s inout --'.format(layer_name))
        acc_max = 0  # init_acc
        # 设置当前层量化输入输出
        is_quantization[layer] = 1
        fl_tmp = fraction_length.copy()
        for fl in range(bit_width):  # 遍历所有的小数位
            print('-- Trying fraction length: {} --'.format(fl))
            fl_tmp[layer] = int(fl)
            # 使用当前参数实例化模型ƒ
            model = Net(bit_width=bit_width,
                        fraction_length=fl_tmp,
                        is_quantization=is_quantization)
            model.load_state_dict(state)
            acc_inout_eval = evaluate(model, data_loader)
            # if 精度最佳 ? 保存参数 : 保持不变
            fraction_length[
                layer] = fl if acc_inout_eval > acc_max else fraction_length[
                    layer]
            acc_max = max(acc_max, acc_inout_eval)
            print('-- layer: {}, fl: {}, acc: {}% --'.format(
                layer_name, fl, round(acc_inout_eval * 100, 2)))
        # save checkpoint
        save_fl(fraction_length.extend(is_quantization), inout_fl_path)

        print('-- layer: {}, best_fl: {}, acc_max: {}% --\n'.format(
            layer_name, int(fraction_length[layer]), round(acc_max * 100, 2)))

    # -------    test section    -------
    print('\n -- Testing --')
    bool_q = numpy.ones_like(fraction_length)  # 返回一个和best一样尺寸的全1矩阵
    model = Net(bit_width=bit_width,
                fraction_length=fraction_length,
                is_quantization=bool_q)
    model.load_state_dict(state)
    acc_inout_eval = evaluate(model)
    print('-- Quantize inout is done, best accuracy is {} --\n '.format(
        acc_inout_eval))
示例#3
0
def quantize_param():
    """
    :param fl:
    :return:
    """
    # 量化开始前先实例化model
    model = Net(bit_width=bit_width,
                fraction_length=fraction_length,
                is_quantization=is_quantization)  # 模型实例化
    print('\n-- Starting Quantize parameter. --')
    device = torch.device(
        "cuda:0") if torch.cuda.is_available() else torch.device("cpu")
    # 使用一个双层循环遍历所有的参数部分,量化层的一个组合参数而不是单独的参数
    for layer in range(len(params)):
        layer_name = '.'.join(params[layer][0].split('.')[0:-1])
        print('\n-- Quantizing {}\'s parameter --'.format(layer_name))
        acc_max = 0  # init_acc
        # 遍历所有的小数位置 fl: fraction_length
        for fl in range(bit_width):
            print('-- Trying fraction length: {} --'.format(fl))
            acc_eval = test_param(layer, fl, device)
            # if 精度大于等于初始/上次的精度 ? 替换记录 : 替换tmp上次参数(为了跨层)
            if acc_eval >= acc_max:
                result_param[layer] = [fl, round(acc_eval * 100,
                                                 2)]  # 保存小数位置和精度
            else:
                # 获取指定部分参数用作恢复
                for key in params[layer]:
                    param_recover = state[key].clone()
                    # 记录中是最好的参数,使用最好的参数恢复
                    param_recover = float2fixed(param_recover.float(),
                                                bit_width,
                                                result_param[layer][0])
                    # 把最好的参数装回tmp参数中
                    state_best[key] = param_recover
            # 保证acc_param 一直是最好的精度
            acc_max = max(acc_max, acc_eval)
            print('-- layer: {}, fl: {}, acc: {}% --'.format(
                layer_name, fl, round(acc_eval * 100, 2)))
        print('-- layer: {}, best_fl: {}, acc_max: {}% --\n'.format(
            layer_name, result_param[layer][0], result_param[layer][1]))

    # -------    test section    -------
    final_state = state.copy()
    # 使用最佳量化策略,量化预训练模型
    # 先遍历层
    for index, layer in enumerate(result_param):
        # 遍历记录 layer[best_fl, acc_max]
        for key in params[index]:
            param = state[key].clone()
            param = float2fixed(param.float(), bit_width, layer[0])
            final_state[key] = param
    model.load_state_dict(final_state)  # eval
    acc_eval = evaluate(model, data_loader, device)  # get eval accuracy
    print('-- Quantize parameter is done, best accuracy is {}% --\n'.format(
        round(acc_eval * 100, 2)))

    # -------    saving quantized model    -------
    print("\n-- Saving quantized model to {} --\n".format(param_saving_path))
    torch.save(final_state, param_saving_path)  # 保存最佳策略下的参数
示例#4
0
def f_eval():
    # 量化开始前先实例化model
    model = Net(bit_width=bit_width,
                fraction_length=fraction_length,
                is_quantization=is_quantization)  # 模型实例化
    model.load_state_dict(state)
    dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # 开始量化前先测试一下精度
    print('\n-- Starting First Eval. -- ')
    acc = evaluate(model, data_loader, dev)
    print('-- Oringin pre-train model\'s accuracy is {}% --\n'.format(
        round(acc * 100, 2)))
def postprocess_for_test(output, checkpoint_dir: str,
                         id2label_dict: Dict[int, str], labels: List[str]):
    """Generation & evaluation."""
    trgs = output.label_ids
    outs = np.argmax(output.predictions, axis=1)
    probs = np.max(output.predictions, axis=1)

    result_file_path = os.path.join(
        checkpoint_dir, f"pretrain_based_{time.strftime('%m%d_%H%M')}.out")
    with open(result_file_path, "w") as f:
        for trg, out, prob in zip(trgs, outs, probs):
            trg = id2label_dict[trg]
            out = id2label_dict[out]

            line = f"{trg}\t{out}\t{prob:.2f}"
            print(line)
            f.write(line + "\n")

    metrics_file_path = os.path.join(
        checkpoint_dir, f"pretrain_based_{time.strftime('%m%d_%H%M')}.report")
    trgs = [id2label_dict[trg] for trg in trgs]
    outs = [id2label_dict[out] for out in outs]
    evaluate(trgs=trgs, outs=outs, detailed=True, fp_save=metrics_file_path)
示例#6
0
def test_param(layer, fl, device):
    model = Net(bit_width=bit_width,
                fraction_length=fraction_length,
                is_quantization=is_quantization)  # 模型实例化
    for key in params[layer]:  # param_name: str 表征一个层的一个参数部分
        # 提取特定层的特定部分参数
        param = state[key].clone()
        # 量化
        param = float2fixed(param.float(), bit_width, fl)
        # 修改tmp参数中的指定层的指定部分参数
        state_best[key] = param
    # 使用模型加载参数
    model.load_state_dict(state_best)
    model.to(device)
    # 计算精度
    acc_eval = evaluate(model, data_loader, device)
    return acc_eval
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}

        # run evaluation
        average_precisions = evaluate(self.generator,
                                      self.model,
                                      iou_threshold=self.iou_threshold,
                                      score_threshold=self.score_threshold,
                                      max_detections=self.max_detections,
                                      save_path=self.save_path)

        # compute per class average precision
        total_instances = []
        precisions = []
        for label, (average_precision,
                    num_annotations) in average_precisions.items():
            if self.verbose == 1:
                print(
                    '{:.0f} instances of class'.format(num_annotations),
                    self.generator.label_to_name(label),
                    'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)
        if self.weighted_average:
            self.mean_ap = sum([
                a * b for a, b in zip(total_instances, precisions)
            ]) / sum(total_instances)
        else:
            self.mean_ap = sum(precisions) / sum(x > 0
                                                 for x in total_instances)

        if self.tensorboard is not None and self.tensorboard.writer is not None:
            import tensorflow as tf
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = self.mean_ap
            summary_value.tag = "mAP"
            self.tensorboard.writer.add_summary(summary, epoch)

        logs['mAP'] = self.mean_ap

        if self.verbose == 1:
            print('mAP: {:.4f}'.format(self.mean_ap))
示例#8
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # make save path if it doesn't exist
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # optionally load config parameters
    if args.config:
        args.config = read_config_file(args.config)

    # create the generator
    generator = create_generator(args)

    # optionally load anchor parameters
    anchor_params = None
    if args.config and 'anchor_parameters' in args.config:
        anchor_params = parse_anchor_parameters(args.config)

    # load the model
    print('Loading model, this may take a second...')
    model = models.load_model(args.model, backbone_name=args.backbone)

    # optionally convert the model
    if args.convert_model:
        model = models.convert_model(model, anchor_params=anchor_params)

    # print model summary
    # print(model.summary())

    # start evaluation
    if args.dataset_type == 'coco':
        from ..utils.coco_eval import evaluate_coco
        evaluate_coco(generator, model, args.score_threshold)
    else:
        average_precisions = evaluate(
            generator,
            model,
            iou_threshold=args.iou_threshold,
            score_threshold=args.score_threshold,
            max_detections=args.max_detections,
            save_path=args.save_path
        )

        # print evaluation
        total_instances = []
        precisions = []
        for label, (average_precision, num_annotations) in average_precisions.items():
            print('{:.0f} instances of class'.format(num_annotations),
                  generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))
            total_instances.append(num_annotations)
            precisions.append(average_precision)

        if sum(total_instances) == 0:
            print('No test instances found.')
            return

        print('mAP using the weighted average of precisions among classes: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)))
        print('mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances)))