Esempio n. 1
0
    def add_super_layer(self, C_cur, C_p, reduction_cur=False, cell_num=3):
        cells = nn.ModuleList()
        reduction_idx = cell_num - 1

        for i in range(cell_num):
            if i == reduction_idx and reduction_cur:
                C_cur *= 2
                reduction = True
            else:
                reduction = False

            if reduction:
                cell = ResNetBasicblock(C_p, C_cur, 2)
            else:
                cell = SearchCell(C_p, C_cur, 1, self.n_nodes,
                                  self.search_space, self.bn_affine,
                                  self.track_running_stats)
                if self.num_edge is None:
                    self.num_edge, self.edge2index = cell.num_edges, cell.edge2index
                else:
                    assert self.num_edge == cell.num_edges and self.edge2index == cell.edge2index, 'invalid {:} vs. {:}.'.format(
                        self.num_edge, cell.num_edges)

            cells.append(cell)
            C_p = cell.out_dim

        return cells
Esempio n. 2
0
    def add_super_layer(self,
                        C_cur,
                        C_p,
                        C_pp,
                        reduction_p=False,
                        reduction_cur=False,
                        cell_num=3,
                        is_slim=False):
        cells = nn.ModuleList()
        # reduction_idx = (cell_num + 1) // 2 - 1
        # the first cell(block) is downsample
        # reduction_idx = 0
        if self.res_stem:
            reduction_idx = 0
        else:
            reduction_idx = cell_num - 1

        for i in range(cell_num):
            if i == reduction_idx and reduction_cur:
                C_cur *= 2
                reduction = True
            else:
                reduction = False
            cell = SearchCell(self.n_nodes, C_pp, C_p, C_cur, reduction_p,
                              reduction, is_slim)
            reduction_p = reduction
            cells.append(cell)
            C_cur_out = C_cur * self.n_nodes
            C_pp, C_p = C_p, C_cur_out

        return cells
Esempio n. 3
0
    def __init__(self,
                 C_in,
                 C,
                 n_classes,
                 n_layers,
                 n_nodes=4,
                 stem_multiplier=3):
        """
        Args:
            C_in: # of input channels
            C : # of starting model channels
            n_classes: # of classes
            n_layers: # of layers
            n_nodes: # of intermediate nodes in Cell
            stem_multiplier
        """
        super().__init__()
        self.C_in = C_in
        self.C = C
        self.n_classes = n_classes
        self.n_layers = n_layers

        C_cur = stem_multiplier * C
        self.stem = nn.Sequential(nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False),
                                  nn.BatchNorm2d(C_cur))

        # in first cell, stem is used for both s0 & s1
        # C_pp & C_p is output channel size // C_cur is input channel size

        C_pp, C_p, C_cur = C_cur, C_cur, C

        self.cells = nn.ModuleList()
        reduction_p = False
        for i in range(n_layers):

            # if layer setting is 2, then set one is normal and another is reduce.
            if n_layers == 2:
                if i == 1:
                    C_cur *= 2
                    reduction = True
                else:
                    reduction = False

            # Reduce featuremap size and double channels in 1/3 and 2/3 layer.
            elif i in [n_layers // 3, 2 * n_layers // 3]:
                C_cur *= 2
                reduction = True
            else:
                reduction = False

            cell = SearchCell(n_nodes, C_pp, C_p, C_cur, reduction_p,
                              reduction)
            reduction_p = reduction
            self.cells.append(cell)
            C_cur_out = C_cur * n_nodes
            C_pp, C_p = C_p, C_cur_out

        self.gap = nn.AdaptiveAvgPool2d(1)
        self.linear = nn.Linear(C_p, n_classes)
Esempio n. 4
0
    def __init__(self,
                 config,
                 C_in,
                 C,
                 n_classes,
                 n_layers,
                 n_nodes=4,
                 stem_multiplier=3):
        """
        Args:
            C_in: # of input channels
            C: # of starting model channels
            n_classes: # of classes
            n_layers: # of layers
            n_nodes: # of intermediate nodes in Cell
            stem_multiplier
        """
        super().__init__()
        self.voice_attention_model = Voice_Attention(config.vocab_size, config)
        self.C_in = C_in
        self.C = C
        self.n_classes = n_classes
        self.n_layers = n_layers

        C_cur = stem_multiplier * C
        self.stem = nn.Sequential(
            nn.ConstantPad2d((1, 1, (3 - 1) * 1, 0), 0.0),
            nn.Conv2d(C_in,
                      C_cur,
                      kernel_size=3,
                      stride=1,
                      padding=0,
                      bias=False), nn.BatchNorm2d(C_cur))

        # for the first cell, stem is used for both s0 and s1
        # [!] C_pp and C_p is output channel size, but C_cur is input channel size.
        C_pp, C_p, C_cur = C_cur, C_cur, C

        self.cells = nn.ModuleList()
        reduction_p = False
        for i in range(n_layers):
            # Reduce featuremap size and double channels in 1/3 and 2/3 layer.
            if i in [n_layers // 3, 2 * n_layers // 3]:
                C_cur *= 2
                reduction = True
            else:
                reduction = False

            cell = SearchCell(n_nodes, C_pp, C_p, C_cur, reduction_p,
                              reduction)
            reduction_p = reduction
            self.cells.append(cell)
            C_cur_out = C_cur * n_nodes
            C_pp, C_p = C_p, C_cur_out

        self.gap = nn.AdaptiveAvgPool2d((200, 1))
        self.linear = nn.Linear(C_p, n_classes)
Esempio n. 5
0
    def __init__(self,
                 C_in,
                 C,
                 n_classes,
                 n_layers,
                 criterion,
                 n_nodes=4,
                 stem_multiplier=3):
        """
        Args:
            C_in: # of input channels
            C: # of starting model channels
            n_classes: # of classes
            n_layers: # of layers
            n_nodes: # of intermediate nodes in Cell
            stem_multiplier
        """
        super().__init__()
        self.C_in = C_in
        self.C = C
        self.n_classes = n_classes
        self.n_layers = n_layers
        self.n_nodes = n_nodes
        self.criterion = criterion

        C_cur = stem_multiplier * C
        self.stem = nn.Sequential(nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False),
                                  nn.BatchNorm2d(C_cur))

        # for the first cell, stem is used for both s0 and s1
        # [!] C_pp and C_p is output channel size, but C_cur is input channel size.
        C_pp, C_p, C_cur = C_cur, C_cur, C

        self.cells = nn.ModuleList()
        reduction_p = False
        for i in range(n_layers):
            # Reduce featuremap size and double channels in 1/3 and 2/3 layer.
            if i in [n_layers // 3, 2 * n_layers // 3]:
                C_cur *= 2
                reduction = True
            else:
                reduction = False

            cell = SearchCell(n_nodes, C_pp, C_p, C_cur, reduction_p,
                              reduction)
            reduction_p = reduction
            self.cells.append(cell)
            C_cur_out = C_cur * n_nodes
            C_pp, C_p = C_p, C_cur_out

        self.gap = nn.AdaptiveAvgPool2d(1)
        self.linear = nn.Linear(C_p, n_classes)

        # initialize architect parameters: alphas
        self._init_alphas()
Esempio n. 6
0
    def __init__(self,
                 C_in,
                 C,
                 n_classes,
                 n_layers,
                 n_nodes=4,
                 stem_multiplier=3):
        """
        Args:
            C_in: # of input channels
            C: # of starting model channels
            n_classes: # of classes
            n_layers: # of layers
            n_nodes: # of intermediate nodes in Cell
            stem_multiplier
        """
        super().__init__()
        self.C_in = C_in
        self.C = C
        self.n_classes = n_classes
        self.n_layers = n_layers

        C_cur = 32
        self.stem = nn.Sequential(
            nn.BatchNorm2d(C_in),
            nn.Conv2d(C_in, C_cur, 5, 2, 2, bias=False),
            nn.BatchNorm2d(C_cur),
            nn.ReLU(),
            nn.Conv2d(C_cur, C_cur, 3, 2, 1, bias=False),
            nn.BatchNorm2d(C_cur),
            nn.ReLU(),
        )

        C_pp, C_p, C_cur = C_cur, C_cur, C_cur

        self.cells = nn.ModuleList()
        reduction_p = False
        for i in range(n_layers):
            if i in [1 * n_layers // 6, 3 * n_layers // 6, 5 * n_layers // 6]:
                C_cur *= 2
                reduction = True
            else:
                reduction = False
            cell = SearchCell(n_nodes, C_pp, C_p, C_cur, reduction_p,
                              reduction)
            reduction_p = reduction
            self.cells.append(cell)
            C_cur_out = C_cur * n_nodes
            C_pp, C_p = C_p, C_cur_out
        self.gap = nn.Sequential(nn.Conv2d(C_p, 512, 3, 2, 1, bias=False),
                                 nn.BatchNorm2d(512), nn.AdaptiveAvgPool2d(1))
        self.linear = nn.Linear(512, n_classes)
Esempio n. 7
0
    def __init__(self,
                 C_in,
                 C,
                 n_classes,
                 n_layers,
                 n_nodes=4,
                 stem_multiplier=3,
                 bn_momentum=0.1):
        """
        Args:
            C_in: # of input channels
            C: # of starting model channels
            n_classes: # of classes
            n_layers: # of layers
            n_nodes: # of intermediate nodes in Cell
            stem_multiplier
        """
        super().__init__()
        self.C_in = C_in
        self.C = C
        self.n_classes = n_classes
        self.n_layers = n_layers

        C_cur = stem_multiplier * C
        self.stem = nn.Sequential(
            nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False),
            nn.BatchNorm2d(C_cur, momentum=bn_momentum),
            nn.Conv2d(C_cur, C_cur, 3, 1, 1, bias=False),
            nn.BatchNorm2d(C_cur,
                           momentum=bn_momentum)  # modification: twice conv2d
        )

        # we discard the skip connection between cells
        # [!] C_p is output channel size, but C_cur is input channel size.
        C_p, C_cur = C_cur, C

        self.cells = nn.ModuleList()
        for i in range(n_layers):
            # No reduction
            cell = SearchCell(n_nodes, C_p, C_cur, bn_momentum=bn_momentum)
            self.cells.append(cell)
            C_cur_out = C_cur * n_nodes
            C_p = C_cur_out

        self.gap = nn.AdaptiveAvgPool2d(1)
        self.linear = nn.Linear(C_p, n_classes)
Esempio n. 8
0
    def __init__(self, in_dim, out_dim, n_layers, n_nodes=4):
        """
        Args:
            n_outputs: # of classes
            n_layers: # of layers
            n_nodes: # of intermediate nodes in Cell
        """
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.n_layers = n_layers

        self.cells = nn.ModuleList()
        for i in range(n_layers):
            cell = SearchCell(in_dim, n_nodes)
            in_dim = cell.out_features
            self.cells.append(cell)
        self.linear = nn.Linear(in_dim, self.out_dim)
Esempio n. 9
0
    def __init__(
        self,
        C_in,
        C,
        n_classes,
        n_layers,
        n_nodes=4,
        stem_multiplier=3,
        imagenet_mode=False,
    ):
        """
        Args:
            C_in: # of input channels
            C: # of starting model channels
            n_classes: # of classes
            n_layers: # of layers
            n_nodes: # of intermediate nodes in Cell
            stem_multiplier
        """
        super().__init__()
        self.C_in = C_in
        self.C = C
        self.n_classes = n_classes
        self.n_layers = n_layers
        self.imagenet_mode = imagenet_mode

        if imagenet_mode:
            self.stem0 = nn.Sequential(
                nn.Conv2d(C_in,
                          C // 2,
                          kernel_size=3,
                          stride=2,
                          padding=1,
                          bias=False),
                nn.BatchNorm2d(C // 2),
                nn.ReLU(inplace=True),
                nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False),
                nn.BatchNorm2d(C),
            )

            self.stem1 = nn.Sequential(
                nn.ReLU(inplace=True),
                nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False),
                nn.BatchNorm2d(C),
            )
            C_pp, C_p, C_cur = C, C, C
        else:
            C_cur = stem_multiplier * C
            self.stem = nn.Sequential(
                nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False),
                nn.BatchNorm2d(C_cur))
            # for the first cell, stem is used for both s0 and s1
            # [!] C_pp and C_p is output channel size, but C_cur is input channel size.
            C_pp, C_p, C_cur = C_cur, C_cur, C

        self.cells = nn.ModuleList()
        reduction_p = imagenet_mode
        for i in range(n_layers):
            # Reduce featuremap size and double channels in 1/3 and 2/3 layer.
            if i in [n_layers // 3, 2 * n_layers // 3]:
                C_cur *= 2
                reduction = True
            else:
                reduction = False

            cell = SearchCell(n_nodes, C_pp, C_p, C_cur, reduction_p,
                              reduction)
            reduction_p = reduction
            self.cells.append(cell)
            C_cur_out = C_cur * n_nodes
            C_pp, C_p = C_p, C_cur_out

        self.gap = nn.AdaptiveAvgPool2d(1)
        self.linear = nn.Linear(C_p, n_classes)