Exemplo n.º 1
0
def prepare_quantizable_module(
    module: torch.nn.Module,
    input_args: Union[torch.Tensor, Sequence[Any]],
    export_folder: str,
    state_dict_file: Optional[str] = None,
    quant_mode: int = 1,
    device: torch.device = torch.device("cuda")
) -> Tuple[torch.nn.Module, Graph]:

    nndct_utils.create_work_dir(export_folder)

    if isinstance(state_dict_file, str):
        state_dict = torch.load(state_dict_file)
        module.load_state_dict(state_dict)

    export_file = os.path.join(export_folder,
                               module._get_name() + TorchSymbol.SCRIPT_SUFFIX)

    # switch to specified device
    module, input_args = to_device(module, input_args, device)

    # parse origin module to graph
    NndctScreenLogger().info(f"=>Parsing {module._get_name()}...")
    graph = parse_module(module, input_args)
    NndctScreenLogger().info(
        f"=>Quantizable module is generated.({export_file})")
    # recreate quantizable module from graph
    quant_module = recreate_nndct_module(graph, True, export_file).to(device)
    quant_module.train(mode=module.training)
    # hook module with graph
    connect_module_with_graph(quant_module, graph)

    return quant_module, graph
Exemplo n.º 2
0
def parse_module(module: torch.nn.Module,
                 input_args: Union[torch.Tensor, Sequence[Any]],
                 enable_opt: bool = True,
                 graph_name: Optional[str] = None) -> Graph:

  parser = TorchParser()
  graph = parser(module._get_name() if graph_name is None else graph_name,
                 module, input_args)
  if enable_opt:
    optimizer = NndctOptimizer(use_quant=True, model_type=FrameworkType.TORCH)
    graph = optimizer.optimize(graph, commands=['FuseBnToConv'])
  return graph
Exemplo n.º 3
0
def parse_module(module: torch.nn.Module,
                 input_args: Union[torch.Tensor, Sequence[Any]],
                 enable_opt: bool = True,
                 graph_name: Optional[str] = None) -> Graph:

    if NndctOption.nndct_equalization.value:
        if NndctOption.nndct_relu6_replace.value == 'reluk':
            replace_relu6_with_reluk(module)
        elif NndctOption.nndct_relu6_replace.value == 'relu':
            replace_relu6_with_relu(module)
    parser = TorchParser()
    graph = parser(module._get_name() if graph_name is None else graph_name,
                   module, input_args)
    if enable_opt:
        optimizer = QuantOptimizer()
        graph = optimizer(graph)
    if NndctOption.nndct_parse_debug.value >= 3:
        NndctDebugLogger.write(f"nndct quant graph:\n{graph}")
    return graph
Exemplo n.º 4
0
def add_layer_saturation(layer: torch.nn.Module,
                         eig_vals: Optional[np.ndarray] = None,
                         n_iter: Optional[int] = None,
                         method='cumvar99'):
    training_state = get_training_state(layer)
    layer_type = layer._get_name().lower()

    if eig_vals is None:
        eig_vals = get_layer_prop(layer, f'{training_state}_eig_vals')
    if n_iter is None:
        n_iter = layer.forward_iter
    nr_eig_vals = get_explained_variance(eig_vals)

    layer_name = layer.name + (f'_{layer.conv_method}'
                               if layer_type == 'conv2d' else '')
    if method == 'cumvar99':
        saturation = get_layer_saturation(nr_eig_vals, layer.out_features)
        layer.writer.add_scalar(
            f'{training_state}-{layer_name}-percent_saturation-{method}',
            saturation, n_iter)
    elif method == 'simpson_di':
        saturation = get_eigenval_diversity_index(eig_vals)
        layer.writer.add_scalar(
            f'{training_state}-{layer_name}-percent_saturation-{method}',
            saturation, n_iter)
    elif method == 'all':
        cumvar99_saturation = get_layer_saturation(nr_eig_vals,
                                                   layer.out_features)
        layer.writer.add_scalar(
            f'{training_state}-{layer_name}-percent_saturation-cumvar99',
            cumvar99_saturation, n_iter)
        simpson_di_saturation = get_eigenval_diversity_index(eig_vals)
        saturation = simpson_di_saturation
        layer.writer.add_scalar(
            f'{training_state}-{layer_name}-percent_saturation-simpson_di',
            simpson_di_saturation, n_iter)
    layer.writer.add_scalar(
        f'{training_state}-{layer_name}-intrinsic_dimensionality', nr_eig_vals,
        n_iter)
    return eig_vals, saturation