示例#1
0
                C, C_out, 1, stride, 0, affine=affine))
        elif downsample == "avgpool":
            self.skip_op = Identity() if stride == 1 else ResNetDownSample(
                stride)

    def forward(self, inputs):
        inner = self.op_1(inputs) + self.skip_op(inputs)
        out = self.op_2(inner) + inner
        return out


register_primitive(
    "bireal_resnet_block",
    lambda C, C_out, stride, affine: BinaryResNetBlock(C,
                                                       C_out,
                                                       stride=stride,
                                                       affine=affine,
                                                       block="bireal",
                                                       act=BirealActivation()),
)
register_primitive(
    "xnor_resnet_block",
    lambda C, C_out, stride, affine: BinaryResNetBlock(
        C, C_out, stride=stride, affine=affine, block="xnor", fp32_act=False),
)
register_primitive(
    "dorefa_resnet_block",
    lambda C, C_out, stride, affine: BinaryResNetBlock(
        C, C_out, stride=stride, affine=affine, block="dorefa"),
)
register_primitive(
示例#2
0
        child = parent.arch.copy()
        change_cell_idx = random.randint(0, self.num_cell_groups - 1)
        change_node_idx = random.randint(self.num_init_nodes,
                                         self._num_nodes - 1)
        change_node_from = random.randint(0, change_node_idx - 1)
        old = child[change_cell_idx][change_node_idx][change_node_from]
        offset = random.randint(1, self.num_op_choices - 1)
        child[change_cell_idx][change_node_idx][change_node_from] = (
            old + offset) % self.num_op_choices
        return DenseRobRollout(child, self)

    @classmethod
    def supported_rollout_types(cls):
        return ["dense_rob"]


class ResSepConv(nn.Module):
    def __init__(self, C_in, C_out, kernel_size, stride, padding):
        super(ResSepConv, self).__init__()
        self.conv = SepConv(C_in, C_out, kernel_size, stride, padding)
        self.res = Identity() if stride == 1 else FactorizedReduce(
            C_in, C_out, stride)

    def forward(self, x):
        return self.conv(x) + self.res(x)


register_primitive(
    "ResSepConv",
    lambda C, C_out, stride, affine: ResSepConv(C, C_out, 3, stride, 1))
示例#3
0
from aw_nas.ops import register_primitive
from aw_nas.final.base import FinalModel

skip_connect_2 = (lambda C, C_out, stride, affine: ops.FactorizedReduce(
    C, C_out, stride=stride, affine=affine)
                  if stride == 2 else ops.ReLUConvBN(C, C_out, 1, 1, 0))

# skip_connect_2 = (
#     lambda C, C_out, stride, affine: ops.FactorizedReduce(
#         C, C_out, stride=stride, affine=affine
#     )
#     if stride == 2
#     else (ops.Identity() if C == C_out else nn.Conv2d(C, C_out, 1, 1, 0))
# )

register_primitive("skip_connect_2", skip_connect_2)


class DenseRobFinalModel(FinalModel):
    NAME = "dense_rob_final_model"

    SCHEDULABLE_ATTRS = ["dropout_path_rate"]

    def __init__(
        self,
        search_space,
        device,
        genotypes,
        num_classes=10,
        init_channels=36,
        stem_multiplier=1,
示例#4
0
        self.op = nn.Sequential(
            nn.BatchNorm2d(C_in*expansion, affine=affine),
            XNORConv2d(C_in, C_in*expansion, kernel_size=kernel_size, stride=stride,
                      dilation=dilation , padding=padding, groups=group),
            nn.ReLU(inplace=False),
        )

    def forward(self, x):
        # FIXME: not sure how to add shortcut with reduction of dilated conv
        return self.op(x)+self.shortcut(x)

    def forward_one_step(self, context=None, inputs=None):
        return self.op.forward_one_step(context, inputs)

register_primitive("xnor_conv_3x3",
                   lambda C, C_out, stride, affine: XNORGroupConv(
                       C, C_out, 3, stride, 1, affine=affine, group=1),
)

register_primitive("xnor_conv_3x3_noskip",
                   lambda C, C_out, stride, affine: XNORGroupConv(
                       C, C_out, 3, stride, 1, affine=affine, group=1, shortcut=False),
)
register_primitive("cond_xnor_conv_3x3_noskip",
                   lambda C, C_out, stride, affine: XNORGroupConv(
                       C, C_out, 3, stride, 1, affine=affine, group=1, shortcut=False)\
                   if stride > 1 or C != C_out else Identity()
)

register_primitive("xnor_conv_5x5",
                   lambda C, C_out, stride, affine: XNORGroupConv(
                       C, C_out, 5, stride, 2, affine=affine, group=1),
示例#5
0
            out = self.inv_bottleneck(out)
        out = self.depth_wise(out)
        if self.se:
            out = self.se(out)
        out = self.point_linear(out)
        if self.shortcut is not None:
            if drop_connect_rate > 0:
                out = drop_connect(out,
                                   p=drop_connect_rate,
                                   training=self.training)
            out = out + self.shortcut(inputs)
        return out


register_primitive(
    "mobilenet_block_6_relu6", lambda C, C_out, stride, affine: MobileNetBlock(
        6, C, C_out, stride, affine, True))
register_primitive(
    "mobilenet_block_1_relu6", lambda C, C_out, stride, affine: MobileNetBlock(
        1, C, C_out, stride, affine, True))
register_primitive(
    "mobilenet_block_6", lambda C, C_out, stride, affine: MobileNetBlock(
        6, C, C_out, stride, affine))
register_primitive(
    "mobilenet_block_6_5x5", lambda C, C_out, stride, affine: MobileNetBlock(
        6, C, C_out, stride, affine, 5))
register_primitive(
    "mobilenet_block_3", lambda C, C_out, stride, affine: MobileNetBlock(
        3, C, C_out, stride, affine))
register_primitive(
    "mobilenet_block_3_5x5", lambda C, C_out, stride, affine: MobileNetBlock(
示例#6
0
# register the ops for layer2 ss
import torch
import torch.nn as nn
from aw_nas.ops import register_primitive

register_primitive(
    "none_for_layer2",
    lambda C, C_out, stride, affine: ZeroLayer2(C, C_out, stride),
)


class ZeroLayer2(nn.Module):
    def __init__(self, C_in, C_out, stride):
        super(ZeroLayer2, self).__init__()
        self.C_in = C_in
        self.C_out = C_out
        self.stride = stride

    def forward(self, x):
        shape = list(x.shape)
        shape[1] = self.C_out
        shape[2] = shape[2] // self.stride
        shape[3] = shape[3] // self.stride
        return torch.zeros(size=shape, dtype=x.dtype, device=x.device)


from aw_nas.btcs.layer2 import (
    search_space,
    controller,
    final_model,
    bi_final_model,
示例#7
0
    "reduction_op_type": "factorized",
    # "layer_order": "conv_bn_relu",
    "layer_order": "bn_conv_relu",
    "binary_conv_cfgs": {
        "bi_w_scale": 1,
        "bi_act_method": 0,
        "bias": False,
    },
}

register_primitive(
    "xnor_resnet_block",
    lambda C, C_out, stride, affine: BinaryResNetBlock(
        C,
        C_out,
        stride=stride,
        affine=affine,
        relu=True,
        downsample="conv",
        binary_cfgs=binary_cfgs,
    ),
)

bireal_binary_cfgs = {
    "shortcut": True,
    "shortcut_op_type": "simple",
    "reduction_op_type": "conv",
    "layer_order": "conv_bn_relu",
    "binary_conv_cfgs": {
        "bi_w_scale": 3,
        "bi_act_method": 3,
        "bias": False,
示例#8
0
def block_4(C_in, C_out, stride, affine):
    return FBNetBlock(C_in, C_out, 5, stride, 1./2)

def block_5(C_in, C_out, stride, affine):
    return FBNetBlock(C_in, C_out, 7, stride, 1./2)

def block_6(C_in, C_out, stride, affine):
    return FBNetBlock(C_in, C_out, 3, stride, 1./4)

def block_7(C_in, C_out, stride, affine):
    return FBNetBlock(C_in, C_out, 5, stride, 1./4)

def block_8(C_in, C_out, stride, affine):
    return FBNetBlock(C_in, C_out, 7, stride, 1./4)

register_primitive("block_0", block_0)
register_primitive("block_1", block_1)
register_primitive("block_2", block_2)
register_primitive("block_3", block_3)
register_primitive("block_4", block_4)
register_primitive("block_5", block_5)
register_primitive("block_6", block_6)
register_primitive("block_7", block_7)
register_primitive("block_8", block_8)

class CosineDecayLR(_LRScheduler):
    def __init__(self, optimizer, T_max, alpha=1e-4,
                 t_mul=2, lr_mul=0.9,
                 last_epoch=-1,
                 warmup_step=300,
                 logger=None):
示例#9
0
    return MobileBlock(C_in, C_out, 7, stride, 1. / 3)


def Mobileblock_6(C_in, C_out, stride, affine):
    return MobileBlock(C_in, C_out, 3, stride, 1. / 6)


def Mobileblock_7(C_in, C_out, stride, affine):
    return MobileBlock(C_in, C_out, 5, stride, 1. / 6)


def Mobileblock_8(C_in, C_out, stride, affine):
    return MobileBlock(C_in, C_out, 7, stride, 1. / 6)


register_primitive("Mobileblock_0", Mobileblock_0)
register_primitive("Mobileblock_1", Mobileblock_1)
register_primitive("Mobileblock_2", Mobileblock_2)
register_primitive("Mobileblock_3", Mobileblock_3)
register_primitive("Mobileblock_4", Mobileblock_4)
register_primitive("Mobileblock_5", Mobileblock_5)
register_primitive("Mobileblock_6", Mobileblock_6)
register_primitive("Mobileblock_7", Mobileblock_7)
register_primitive("Mobileblock_8", Mobileblock_8)
register_primitive("Resblock_0", Resblock_0)
register_primitive("Resblock_1", Resblock_1)
register_primitive("Resblock_2", Resblock_2)
register_primitive("Resblock_3", Resblock_3)
register_primitive("Resblock_4", Resblock_4)
register_primitive("Resblock_5", Resblock_5)
register_primitive("Resblock_6", Resblock_6)