def __init__(self, InChannel, OutChannel, growRate, nConvLayers, kSize=3): """Initialize Block. :param InChannel: channel number of input :type InChannel: int :param OutChannel: channel number of output :type OutChannel: int :param growRate: growth rate of block :type growRate: int :param nConvLayers: the number of convlution layer :type nConvLayers: int :param kSize: kernel size of convolution operation :type kSize: int """ super(Cont_RDB, self).__init__() self.InChan = InChannel self.OutChan = OutChannel self.G = growRate self.C = nConvLayers if self.InChan != self.G: self.InConv = ops.Conv2d(self.InChan, self.G, 1, padding=0, stride=1) if self.OutChan != self.G and self.OutChan != self.InChan: self.OutConv = ops.Conv2d(self.InChan, self.OutChan, 1, padding=0, stride=1) self.pool = ops.AvgPool2d(2, 2) self.shup = ops.PixelShuffle(2) self.Convs = ops.MoudleList() self.ShrinkConv = ops.MoudleList() for i in range(self.C): self.Convs.append( Sequential( ops.Conv2d(self.G, self.G, kSize, padding=(kSize - 1) // 2, stride=1), ops.Relu())) if i < (self.C - 1): self.ShrinkConv.append( ops.Conv2d((2 + i) * self.G, self.G, 1, padding=0, stride=1)) else: self.ShrinkConv.append( ops.Conv2d(int((2 + i) * self.G / 4), self.OutChan, 1, padding=0, stride=1))
def _transsorm_op(init_layer): """Transform the torch op to Vega op.""" if isinstance(init_layer, nn.Conv2d): in_channels = init_layer.in_channels out_channels = init_layer.out_channels kernel_size = init_layer.kernel_size stride = init_layer.stride padding = init_layer.padding # bias = init_layer.bias new_layer = ops.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding) elif isinstance(init_layer, nn.BatchNorm2d): num_features = init_layer.num_features new_layer = ops.BatchNorm2d(num_features=num_features) elif isinstance(init_layer, nn.ReLU): new_layer = ops.Relu() elif isinstance(init_layer, nn.MaxPool2d): kernel_size = init_layer.kernel_size stride = init_layer.stride padding = init_layer.padding new_layer = ops.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) elif isinstance(init_layer, nn.AvgPool2d): kernel_size = init_layer.kernel_size stride = init_layer.stride padding = init_layer.padding new_layer = ops.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) elif isinstance(init_layer, nn.AdaptiveAvgPool2d): output_size = init_layer.output_size new_layer = ops.AdaptiveAvgPool2d(output_size=output_size) elif isinstance(init_layer, nn.Linear): in_features = init_layer.in_features out_features = init_layer.out_features # use_bias = init_layer.bias new_layer = ops.Linear(in_features=in_features, out_features=out_features) elif isinstance(init_layer, nn.Dropout): prob = init_layer.p inplace = init_layer.inplace new_layer = ops.Dropout(prob=prob, inplace=inplace) else: raise ValueError("The op {} is not supported.".format( type(init_layer))) return new_layer
def __init__(self, C, num_classes, input_size): """Init AuxiliaryHead.""" super(AuxiliaryHead, self).__init__() stride = input_size - 5 self.relu1 = ops.Relu(inplace=True) self.avgpool1 = ops.AvgPool2d(5, stride=stride, padding=0, count_include_pad=False) self.conv1 = ops.Conv2d(C, 128, 1, bias=False) self.batchnorm1 = ops.BatchNorm2d(128) self.relu2 = ops.Relu(inplace=True) self.conv2 = ops.Conv2d(128, 768, 2, bias=False) self.batchnorm2 = ops.BatchNorm2d(768) self.relu3 = ops.Relu(inplace=True) self.view = ops.View() self.classifier = ops.Linear(768, num_classes)
# This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """Import all torch operators.""" from zeus.common import ClassType, ClassFactory from zeus.modules.operators import Seq, SeparatedConv, DilConv, GAPConv1x1, conv1X1, \ conv3x3, conv5x5, conv7x7, FactorizedReduce from zeus.modules.operators import ops OPS = { 'none': lambda C, stride, affine, repeats=1: ops.Zero(stride), 'avg_pool_3x3': lambda C, stride, affine, repeats=1: ops.AvgPool2d( 3, stride=stride, padding=1, count_include_pad=False), 'max_pool_3x3': lambda C, stride, affine, repeats=1: ops.MaxPool2d( 3, stride=stride, padding=1), 'global_average_pool': lambda C, stride, affine, repeats=1: Seq(GAPConv1x1(C, C)), 'skip_connect': lambda C, stride, affine, repeats=1: ops.Identity() if stride == 1 else FactorizedReduce( C, C, affine=affine), 'sep_conv_3x3': lambda C, stride, affine, repeats=1: SeparatedConv(C, C, 3, stride, 1, affine=affine), 'sep_conv_5x5': lambda C, stride, affine, repeats=1: SeparatedConv(C, C, 5, stride, 2, affine=affine), 'sep_conv_7x7': lambda C, stride, affine, repeats=1: SeparatedConv(C, C, 7, stride, 3, affine=affine), 'dil_conv_3x3': lambda C, stride, affine, repeats=1: DilConv(C, C, 3, stride, 2, 2, affine=affine), 'dil_conv_5x5': lambda C, stride, affine, repeats=1: DilConv(C, C, 5, stride, 4, 2, affine=affine), 'conv_7x1_1x7': lambda C, stride, affine, repeats=1: Seq( ops.Relu(inplace=False), ops.Conv2d(C, C, (1, 7), stride=(1, stride), padding=(0, 3), bias=False), ops.Conv2d(C, C, (7, 1), stride=(stride, 1), padding=(3, 0), bias=False), ops.BatchNorm2d(C, affine=affine)),