Beispiel #1
0
def process_oneshot_mutations(base_model: nn.Module, evaluator: Evaluator):
    # It's not intuitive, at all, (actually very hacky) to wrap a `base_model` and `evaluator` into a graph.Model.
    # But unfortunately, this is the required interface of strategy.
    model = Model(_internal=True)
    model.python_object = base_model
    # no need to set evaluator here because it will be set after this method is called

    return model, []
Beispiel #2
0
def process_inline_mutation(model: Model) -> Optional[List[Mutator]]:
    applied_mutators = []

    ic_nodes = _group_by_label(model.get_nodes_by_type('__torch__.nni.retiarii.nn.pytorch.api.InputChoice'))
    for node_list in ic_nodes:
        assert _is_all_equal(map(lambda node: node.operation.parameters['n_candidates'], node_list)) and \
            _is_all_equal(map(lambda node: node.operation.parameters['n_chosen'], node_list)), \
            'Input choice with the same label must have the same number of candidates.'
        mutator = InputChoiceMutator(node_list)
        applied_mutators.append(mutator)

    vc_nodes = _group_by_label(model.get_nodes_by_type('__torch__.nni.retiarii.nn.pytorch.api.ValueChoice'))
    for node_list in vc_nodes:
        assert _is_all_equal(map(lambda node: node.operation.parameters['candidates'], node_list)), \
            'Value choice with the same label must have the same candidates.'
        mutator = ValueChoiceMutator(node_list, node_list[0].operation.parameters['candidates'])
        applied_mutators.append(mutator)

    pc_nodes = []
    for node in model.get_nodes():
        for name, choice in node.operation.parameters.items():
            if isinstance(choice, ValueChoice):
                pc_nodes.append((node, name))
    pc_nodes = _group_parameters_by_label(pc_nodes)
    for node_list in pc_nodes:
        assert _is_all_equal([node.operation.parameters[name].candidates for node, name in node_list]), \
            'Value choice with the same label must have the same candidates.'
        first_node, first_argname = node_list[0]
        mutator = ParameterChoiceMutator(node_list, first_node.operation.parameters[first_argname].candidates)
        applied_mutators.append(mutator)

    # apply layer choice at last as it will delete some nodes
    lc_nodes = _group_by_label(filter(lambda d: d.operation.parameters.get('mutation') == 'layerchoice',
                                      model.get_nodes_by_type('_cell')))
    for node_list in lc_nodes:
        assert _is_all_equal(map(lambda node: len(node.operation.parameters['candidates']), node_list)), \
            'Layer choice with the same label must have the same number of candidates.'
        mutator = LayerChoiceMutator(node_list)
        applied_mutators.append(mutator)

    repeat_nodes = _group_by_label(filter(lambda d: d.operation.parameters.get('mutation') == 'repeat',
                                          model.get_nodes_by_type('_cell')))
    for node_list in repeat_nodes:
        assert _is_all_equal(map(lambda node: node.operation.parameters['max_depth'], node_list)) and \
            _is_all_equal(map(lambda node: node.operation.parameters['min_depth'], node_list)), \
            'Repeat with the same label must have the same number of candidates.'
        mutator = RepeatMutator(node_list)
        applied_mutators.append(mutator)

    if applied_mutators:
        return applied_mutators
    return None
Beispiel #3
0
    def mutate(self, model: Model) -> None:
        # looks like {"label1": "cat", "label2": 123}
        value_choice_decisions = {}
        for mutation in model.history:
            if isinstance(mutation.mutator, ParameterChoiceLeafMutator):
                value_choice_decisions[
                    mutation.mutator.label] = mutation.samples[0]

        for node, argname in self.nodes:
            # argname is the location of the argument
            # e.g., Conv2d(out_channels=nn.ValueChoice([1, 2, 3])) => argname = "out_channels"
            value_choice: ValueChoiceX = node.operation.parameters[argname]

            # calculate all the values on the leaf node of ValueChoiceX computation graph
            leaf_node_values = []
            for choice in value_choice.inner_choices():
                leaf_node_values.append(value_choice_decisions[choice.label])
            result_value = value_choice.evaluate(leaf_node_values)

            # update model with graph mutation primitives
            target = cast(Node, model.get_node_by_name(node.name))
            target.update_operation(
                target.operation.type, {
                    **target.operation.parameters, argname: result_value
                })
Beispiel #4
0
    def mutate(self, model: Model) -> None:
        value_choice_decisions = {}
        for mutation in model.history:
            if isinstance(mutation.mutator, EvaluatorValueChoiceLeafMutator):
                value_choice_decisions[
                    mutation.mutator.label] = mutation.samples[0]

        model.evaluator = self._mutate_traceable_object(
            model.evaluator, value_choice_decisions)
Beispiel #5
0
 def mutate(self, model: Model):
     # this mutate does not have any effect, but it is recorded in the mutation history
     for node in model.get_nodes_by_label(self.label):
         n_chosen = self.number_of_chosen(node)
         if n_chosen is None:
             candidates = [i for i in self.candidates(node) if self.choice([False, True])]
             # FIXME This is a hack to make choice align with the previous format
             # For example, it will convert [False, True, True] into [1, 2].
             self._cur_samples = candidates
         else:
             for _ in range(n_chosen):
                 self.choice(self.candidates(node))
         break
Beispiel #6
0
    def mutate(self, model: Model):
        max_num_edges = cast(int, None)
        for node in model.get_nodes_by_label(self.label):
            max_num_edges = node.operation.parameters['max_num_edges']
            break
        assert max_num_edges is not None
        mutation_dict = {
            mut.mutator.label: mut.samples
            for mut in model.history
        }
        num_nodes = mutation_dict[f'{self.label}/num_nodes'][0]
        adjacency_list = [
            mutation_dict[f'{self.label}/input{i}']
            for i in range(1, num_nodes)
        ]
        if sum([len(e) for e in adjacency_list]) > max_num_edges:
            raise InvalidMutation(
                f'Expected {max_num_edges} edges, found: {adjacency_list}')
        matrix = _NasBench101CellFixed.build_connection_matrix(
            adjacency_list, num_nodes)

        operations = ['IN'] + [
            mutation_dict[f'{self.label}/op{i}'][0]
            for i in range(1, num_nodes - 1)
        ] + ['OUT']
        assert len(operations) == len(matrix)
        matrix, operations = prune(
            matrix, operations)  # possible to raise InvalidMutation inside

        # NOTE: a hack to maintain a clean copy of what nasbench101 cell looks like
        self._cur_samples = {}
        for i in range(1, len(matrix)):
            if i + 1 < len(matrix):
                self._cur_samples[f'op{i}'] = operations[i]
            self._cur_samples[f'input{i}'] = [
                k for k in range(i) if matrix[k, i]
            ]
        self._cur_samples = [self._cur_samples
                             ]  # by design, _cur_samples is a list of samples
Beispiel #7
0
def extract_mutation_from_pt_module(
        pytorch_model: nn.Module) -> Tuple[Model, Optional[List[Mutator]]]:
    model = Model(_internal=True)
    graph = Graph(model, uid(), '_model', _internal=True)._register()
    model.python_class = pytorch_model.__class__
    if len(inspect.signature(model.python_class.__init__).parameters) > 1:
        if not is_model_wrapped(pytorch_model):
            raise ValueError(
                'Please annotate the model with @model_wrapper decorator in python execution mode '
                'if your model has init parameters.')
        model.python_init_params = cast(dict, pytorch_model.trace_kwargs)
    else:
        model.python_init_params = {}

    # hyper-parameter choice
    namespace: ModelNamespace = cast(ModelNamespace,
                                     pytorch_model._model_namespace)
    for param_spec in namespace.parameter_specs:
        assert param_spec.categorical and param_spec.type == 'choice'
        node = graph.add_node(f'param_spec_{param_spec.name}',
                              'ModelParameterChoice',
                              {'candidates': param_spec.values})
        node.label = param_spec.name

    for name, module in pytorch_model.named_modules():
        # tricky case: value choice that serves as parameters are stored in traced arguments
        if is_basic_unit(module):
            trace_kwargs = cast(Dict[str, Any], module.trace_kwargs)
            for key, value in trace_kwargs.items():
                if isinstance(value, ValueChoiceX):
                    for i, choice in enumerate(value.inner_choices()):
                        node = graph.add_node(
                            f'{name}.init.{key}.{i}', 'ValueChoice',
                            {'candidates': choice.candidates})
                        node.label = choice.label

        if isinstance(module, (LayerChoice, InputChoice, ValueChoice)):
            # TODO: check the label of module and warn if it's auto-generated
            pass
        if isinstance(module, LayerChoice):
            node = graph.add_node(name, 'LayerChoice',
                                  {'candidates': module.names})
            node.label = module.label
        if isinstance(module, InputChoice):
            node = graph.add_node(name, 'InputChoice', {
                'n_candidates': module.n_candidates,
                'n_chosen': module.n_chosen
            })
            node.label = module.label
        if isinstance(module, ValueChoiceX):
            for i, choice in enumerate(module.inner_choices()):
                node = graph.add_node(f'{name}.{i}', 'ValueChoice',
                                      {'candidates': choice.candidates})
                node.label = choice.label
        if isinstance(module, NasBench101Cell):
            node = graph.add_node(name, 'NasBench101Cell',
                                  {'max_num_edges': module.max_num_edges})
            node.label = module.label
        if isinstance(module, Placeholder):
            raise NotImplementedError(
                'Placeholder is not supported in python execution mode.')

    model.status = ModelStatus.Frozen
    if not graph.hidden_nodes:
        return model, None

    mutators = []
    mutators_final = []
    for nodes in _group_by_label_and_type(graph.hidden_nodes):
        label = nodes[0].label
        assert label is not None, f'label of {nodes[0]} can not be None.'
        assert _is_all_equal(map(lambda n: n.operation.type, nodes)), \
            f'Node with label "{label}" does not all have the same type.'
        assert _is_all_equal(map(lambda n: n.operation.parameters, nodes)), \
            f'Node with label "{label}" does not agree on parameters.'
        if nodes[0].operation.type == 'NasBench101Cell':
            # The mutation of Nas-bench-101 is special, and has to be done lastly.
            mutators_final.append(NasBench101Mutator(label))
        else:
            mutators.append(ManyChooseManyMutator(label))
    return model, mutators + mutators_final
Beispiel #8
0
def process_inline_mutation(model: Model) -> Optional[List[Mutator]]:
    applied_mutators = []

    ic_nodes = _group_by_label(
        model.get_nodes_by_type(
            '__torch__.nni.retiarii.nn.pytorch.api.InputChoice'))
    for node_list in ic_nodes:
        assert _is_all_equal(map(lambda node: node.operation.parameters['n_candidates'], node_list)) and \
            _is_all_equal(map(lambda node: node.operation.parameters['n_chosen'], node_list)), \
            'Input choice with the same label must have the same number of candidates.'
        mutator = InputChoiceMutator(node_list)
        applied_mutators.append(mutator)

    vc_nodes = _group_by_label(
        model.get_nodes_by_type(
            '__torch__.nni.retiarii.nn.pytorch.api.ValueChoice'))
    for node_list in vc_nodes:
        assert _is_all_equal(map(lambda node: node.operation.parameters['candidates'], node_list)), \
            'Value choice with the same label must have the same candidates.'
        mutator = ValueChoiceMutator(
            node_list, node_list[0].operation.parameters['candidates'])
        applied_mutators.append(mutator)

    # `pc_nodes` are arguments of basic units. They can be compositions.
    pc_nodes: List[Tuple[Node, str, ValueChoiceX]] = []
    for node in model.get_nodes():
        # arguments used in operators like Conv2d
        # argument `valuechoice` used in generated repeat cell
        for name, choice in node.operation.parameters.items():
            if isinstance(choice, ValueChoiceX):
                # e.g., (conv_node, "out_channels", ValueChoice([1, 3]))
                pc_nodes.append((node, name, choice))

    # Break `pc_nodes` down to leaf value choices. They should be what we want to sample.
    leaf_value_choices: Dict[str, List[Any]] = {}
    for _, __, choice in pc_nodes:
        for inner_choice in choice.inner_choices():
            if inner_choice.label not in leaf_value_choices:
                leaf_value_choices[
                    inner_choice.label] = inner_choice.candidates
            else:
                assert leaf_value_choices[inner_choice.label] == inner_choice.candidates, \
                    'Value choice with the same label must have the same candidates, but found ' \
                    f'{leaf_value_choices[inner_choice.label]} vs. {inner_choice.candidates}'

    for label, candidates in leaf_value_choices.items():
        applied_mutators.append(ParameterChoiceLeafMutator(candidates, label))

    # in the end, add another parameter choice mutator for "real" mutations
    if pc_nodes:
        applied_mutators.append(
            ParameterChoiceMutator([(node, name)
                                    for node, name, _ in pc_nodes]))

    # apply layer choice at last as it will delete some nodes
    lc_nodes = _group_by_label(
        filter(
            lambda d: d.operation.parameters.get('mutation') == 'layerchoice',
            model.get_nodes_by_type('_cell')))
    for node_list in lc_nodes:
        assert _is_all_equal(map(lambda node: len(node.operation.parameters['candidates']), node_list)), \
            'Layer choice with the same label must have the same number of candidates.'
        mutator = LayerChoiceMutator(node_list)
        applied_mutators.append(mutator)

    repeat_nodes = _group_by_label(
        filter(lambda d: d.operation.parameters.get('mutation') == 'repeat',
               model.get_nodes_by_type('_cell')))
    for node_list in repeat_nodes:
        # this check is not completely reliable, because it only checks max and min
        assert _is_all_equal(map(lambda node: node.operation.parameters['max_depth'], node_list)) and \
            _is_all_equal(map(lambda node: node.operation.parameters['min_depth'], node_list)), \
            'Repeat with the same label must have the same candidates.'
        mutator = RepeatMutator(node_list)
        applied_mutators.append(mutator)

    if applied_mutators:
        return applied_mutators
    return None
Beispiel #9
0
def extract_mutation_from_pt_module(
        pytorch_model: nn.Module) -> Tuple[Model, Optional[List[Mutator]]]:
    model = Model(_internal=True)
    graph = Graph(model, uid(), '_model', _internal=True)._register()
    model.python_class = pytorch_model.__class__
    if len(inspect.signature(model.python_class.__init__).parameters) > 1:
        if not getattr(pytorch_model, '_nni_model_wrapper', False):
            raise ValueError(
                'Please annotate the model with @model_wrapper decorator in python execution mode '
                'if your model has init parameters.')
        model.python_init_params = pytorch_model.trace_kwargs
    else:
        model.python_init_params = {}

    for name, module in pytorch_model.named_modules():
        # tricky case: value choice that serves as parameters are stored in traced arguments
        if is_basic_unit(module):
            for key, value in module.trace_kwargs.items():
                if isinstance(value, ValueChoice):
                    node = graph.add_node(name + '.init.' + key, 'ValueChoice',
                                          {'candidates': value.candidates})
                    node.label = value.label

        if isinstance(module, (LayerChoice, InputChoice, ValueChoice)):
            # TODO: check the label of module and warn if it's auto-generated
            pass
        if isinstance(module, LayerChoice):
            node = graph.add_node(name, 'LayerChoice',
                                  {'candidates': module.names})
            node.label = module.label
        if isinstance(module, InputChoice):
            node = graph.add_node(name, 'InputChoice', {
                'n_candidates': module.n_candidates,
                'n_chosen': module.n_chosen
            })
            node.label = module.label
        if isinstance(module, ValueChoice):
            node = graph.add_node(name, 'ValueChoice',
                                  {'candidates': module.candidates})
            node.label = module.label
        if isinstance(module, Repeat) and module.min_depth <= module.max_depth:
            node = graph.add_node(name, 'Repeat', {
                'candidates':
                list(range(module.min_depth, module.max_depth + 1))
            })
            node.label = module.label
        if isinstance(module, NasBench101Cell):
            node = graph.add_node(name, 'NasBench101Cell',
                                  {'max_num_edges': module.max_num_edges})
            node.label = module.label
        if isinstance(module, Placeholder):
            raise NotImplementedError(
                'Placeholder is not supported in python execution mode.')

    model.status = ModelStatus.Frozen
    if not graph.hidden_nodes:
        return model, None

    mutators = []
    mutators_final = []
    for nodes in _group_by_label_and_type(graph.hidden_nodes):
        assert _is_all_equal(map(lambda n: n.operation.type, nodes)), \
            f'Node with label "{nodes[0].label}" does not all have the same type.'
        assert _is_all_equal(map(lambda n: n.operation.parameters, nodes)), \
            f'Node with label "{nodes[0].label}" does not agree on parameters.'
        if nodes[0].operation.type == 'NasBench101Cell':
            mutators_final.append(NasBench101Mutator(nodes[0].label))
        else:
            mutators.append(ManyChooseManyMutator(nodes[0].label))
    return model, mutators + mutators_final