Esempio n. 1
0
    def __init__(self, op_desc:OpDesc, arch_params: Optional[ArchParams],
                 reduction:bool, affine:bool):
        super().__init__()

        # assume last PRIMITIVE is 'none' (this is used for finalize)
        assert PetridishOp.PRIMITIVES[-1] == 'none'

        # create edges for the op, each edge connects input state,
        # within each edge we will have all N primitives
        self._edges = nn.ModuleList()

        for i in range(op_desc.in_len):
            # edge contains all primitives with alphas
            edge = nn.ModuleList()
            self._edges.append(edge)

            # for each input stride could be different,
            # so we will make copy of our params and then set stride for this input
            params = deepcopy(op_desc.params)
            params['stride'] = op_desc.params['_strides'][i]

            # create primitives for the edge
            for primitive in PetridishOp.PRIMITIVES:
                primitive_op = Op.create(OpDesc(primitive, params=params,
                                                in_len=1, trainables=None),
                                        affine=affine, arch_params=None)
                # wrap primitive with sg
                op = nn.Sequential(StopGradient(), primitive_op)
                edge.append(op)

        # TODO: check with Dey: Do we really need StopForwardReductionOp
        #   or StopGradientReductionOp because these two will only make sense
        #   for cell stems.
        # NOTE: Consider the case where prev_prev is normal, prev is reduction
        # then s_0 is twice as big in each dimension as s_1 and the number of channels
        # won't match. So you have to use StopGradientReductionOp on s_1 to make it match.
        self._sf = StopForward()

        # we do this at the end so that we can capture all arch params registered by
        # any previous child modules
        self._setup_arch_params(arch_params, op_desc.in_len)
Esempio n. 2
0
    def __init__(self, op_desc: OpDesc, arch_params: Optional[ArchParams],
                 affine: bool):
        super().__init__()

        # assume last PRIMITIVE is 'none'
        assert XnasOp.PRIMITIVES[-1] == 'none'

        self._ops = nn.ModuleList()
        for primitive in XnasOp.PRIMITIVES:
            op = Op.create(OpDesc(primitive,
                                  op_desc.params,
                                  in_len=1,
                                  trainables=None),
                           affine=affine,
                           arch_params=None)
            self._ops.append(op)

        # for getting gradients to non-leaf node
        self._grad = None

        # we do this at the end so that we can capture all arch params registered by
        # any previous child modules
        self._setup_arch_params(arch_params)
Esempio n. 3
0
    def __init__(self, op_desc: OpDesc, arch_params: Optional[ArchParams],
                 affine: bool):
        super().__init__()

        # assume last PRIMITIVE is 'none'
        assert DivOp.PRIMITIVES[-1] == 'none'

        conf = get_conf()
        trainer = conf['nas']['search']['divnas']['archtrainer']
        finalizer = conf['nas']['search']['finalizer']

        if trainer == 'noalpha' and finalizer == 'default':
            raise NotImplementedError(
                'noalpha trainer is not implemented for the default finalizer')

        if trainer != 'noalpha':
            self._setup_arch_params(arch_params)
        else:
            self._alphas = None

        self._ops = nn.ModuleList()
        for primitive in DivOp.PRIMITIVES:
            op = Op.create(OpDesc(primitive,
                                  op_desc.params,
                                  in_len=1,
                                  trainables=None),
                           affine=affine,
                           arch_params=None)
            self._ops.append(op)

        # various state variables for diversity
        self._collect_activations = False
        self._forward_counter = 0
        self._batch_activs = None
        self._indices_of_notallowed()
        self._create_mapping_valid_to_orig()
 def pre_build(self, conf_model_desc: Config) -> None:
     Op.register_op(
         'div_op', lambda op_desc, arch_params, affine: DivOp(
             op_desc, arch_params, affine))
Esempio n. 5
0
 def register_ops(self) -> None:
     Op.register_op(
         'gs_op', lambda op_desc, arch_params, affine: GsOp(
             op_desc, arch_params, affine))
Esempio n. 6
0
 def _stem_reductions(stems:List[OpDesc])->List[int]:
     # create stem ops to find out reduction factors
     ops = [Op.create(stem, affine=False) for stem in stems]
     assert all(isinstance(op, StemBase) for op in ops)
     return list(op.reduction for op in ops)
Esempio n. 7
0
 def pre_build(self, conf_model_desc: Config) -> None:
     Op.register_op(
         'nasbench101_op',
         lambda op_desc, arch_params, affine: NasBench101Op(
             op_desc, arch_params, affine))
Esempio n. 8
0
 def register_ops(self) -> None:
     Op.register_op(
         'nasbench101_op',
         lambda op_desc, arch_params, affine: NasBench101Op(
             op_desc, arch_params, affine))
Esempio n. 9
0
 def register_ops(self) -> None:
     Op.register_op(
         'mixed_op', lambda op_desc, arch_params, affine: MixedOp(
             op_desc, arch_params, affine))