def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(FirstCell, self).__init__() self.conv_1x1 = nn.SequentialCell([ nn.ReLU(), nn.Conv2d(in_channels=in_channels_right, out_channels=out_channels_right, kernel_size=1, stride=1, pad_mode='pad', has_bias=False), nn.BatchNorm2d(num_features=out_channels_right, eps=0.001, momentum=0.9, affine=True) ]) self.relu = nn.ReLU() self.path_1 = nn.SequentialCell([ nn.AvgPool2d(kernel_size=1, stride=2, pad_mode='valid'), nn.Conv2d(in_channels=in_channels_left, out_channels=out_channels_left, kernel_size=1, stride=1, pad_mode='pad', has_bias=False) ]) self.path_2 = nn.CellList([]) self.path_2.append( nn.Pad(paddings=((0, 0), (0, 0), (0, 1), (0, 1)), mode="CONSTANT")) self.path_2.append( nn.AvgPool2d(kernel_size=1, stride=2, pad_mode='valid')) self.path_2.append( nn.Conv2d(in_channels=in_channels_left, out_channels=out_channels_left, kernel_size=1, stride=1, pad_mode='pad', has_bias=False)) self.final_path_bn = nn.BatchNorm2d(num_features=out_channels_left * 2, eps=0.001, momentum=0.9, affine=True) self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False) self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False) self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False) self.comb_iter_2_left = nn.AvgPool2d(kernel_size=3, stride=1, pad_mode='same') self.comb_iter_3_left = nn.AvgPool2d(kernel_size=3, stride=1, pad_mode='same') self.comb_iter_3_right = nn.AvgPool2d(kernel_size=3, stride=1, pad_mode='same') self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def __init__(self, num_classes=1000): """ Constructor Args: num_classes: number of classes. """ super(Xception, self).__init__() self.num_classes = num_classes self.conv1 = nn.Conv2d(3, 32, 3, 2, pad_mode='valid', weight_init=config.weight_init) self.bn1 = nn.BatchNorm2d(32, momentum=0.9) self.relu = nn.ReLU() self.conv2 = nn.Conv2d(32, 64, 3, pad_mode='valid', weight_init=config.weight_init) self.bn2 = nn.BatchNorm2d(64, momentum=0.9) # Entry flow self.block1 = Block(64, 128, 2, 2, start_with_relu=False, grow_first=True) self.block2 = Block(128, 256, 2, 2, start_with_relu=True, grow_first=True) self.block3 = Block(256, 728, 2, 2, start_with_relu=True, grow_first=True) # Middle flow self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) # Exit flow self.block12 = Block(728, 1024, 2, 2, start_with_relu=True, grow_first=False) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) self.bn3 = nn.BatchNorm2d(1536, momentum=0.9) self.conv4 = SeparableConv2d(1536, 2048, 3, 1, 1) self.bn4 = nn.BatchNorm2d(2048, momentum=0.9) self.avg_pool = nn.AvgPool2d(10) self.dropout = nn.Dropout() self.fc = nn.Dense(2048, num_classes)
def __init__(self, stem_filters, num_filters): super(CellStem1, self).__init__() self.num_filters = num_filters self.stem_filters = stem_filters self.conv_1x1 = nn.SequentialCell([ nn.ReLU(), nn.Conv2d(in_channels=2 * self.num_filters, out_channels=self.num_filters, kernel_size=1, stride=1, pad_mode='pad', has_bias=False), nn.BatchNorm2d(num_features=self.num_filters, eps=0.001, momentum=0.9, affine=True) ]) self.relu = nn.ReLU() self.path_1 = nn.SequentialCell([ nn.AvgPool2d(kernel_size=1, stride=2, pad_mode='valid'), nn.Conv2d(in_channels=self.stem_filters, out_channels=self.num_filters // 2, kernel_size=1, stride=1, pad_mode='pad', has_bias=False) ]) self.path_2 = nn.CellList([]) self.path_2.append( nn.Pad(paddings=((0, 0), (0, 0), (0, 1), (0, 1)), mode="CONSTANT")) self.path_2.append( nn.AvgPool2d(kernel_size=1, stride=2, pad_mode='valid')) self.path_2.append( nn.Conv2d(in_channels=self.stem_filters, out_channels=self.num_filters // 2, kernel_size=1, stride=1, pad_mode='pad', has_bias=False)) self.final_path_bn = nn.BatchNorm2d(num_features=self.num_filters, eps=0.001, momentum=0.9, affine=True) self.comb_iter_0_left = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, bias=False) self.comb_iter_0_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, bias=False) self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, pad_mode='same') self.comb_iter_1_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, bias=False) self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, pad_mode='same') self.comb_iter_2_right = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, bias=False) self.comb_iter_3_right = nn.AvgPool2d(kernel_size=3, stride=1, pad_mode='same') self.comb_iter_4_left = BranchSeparables(self.num_filters, self.num_filters, 3, 1, 1, bias=False) self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, pad_mode='same') self.shape = P.Shape()
def __init__(self, model_settings, model_size_info): super(DSCNN, self).__init__() # N C H W label_count = model_settings['label_count'] input_frequency_size = model_settings['dct_coefficient_count'] input_time_size = model_settings['spectrogram_length'] t_dim = input_time_size f_dim = input_frequency_size num_layers = model_size_info[0] conv_feat = [None] * num_layers conv_kt = [None] * num_layers conv_kf = [None] * num_layers conv_st = [None] * num_layers conv_sf = [None] * num_layers i = 1 for layer_no in range(0, num_layers): conv_feat[layer_no] = model_size_info[i] i += 1 conv_kt[layer_no] = model_size_info[i] i += 1 conv_kf[layer_no] = model_size_info[i] i += 1 conv_st[layer_no] = model_size_info[i] i += 1 conv_sf[layer_no] = model_size_info[i] i += 1 seq_cell = [] in_channel = 1 for layer_no in range(0, num_layers): if layer_no == 0: seq_cell.append( nn.Conv2d(in_channels=in_channel, out_channels=conv_feat[layer_no], kernel_size=(conv_kt[layer_no], conv_kf[layer_no]), stride=(conv_st[layer_no], conv_sf[layer_no]), pad_mode="same", padding=0, has_bias=False)) seq_cell.append( nn.BatchNorm2d(num_features=conv_feat[layer_no], momentum=0.98)) in_channel = conv_feat[layer_no] else: seq_cell.append( DepthWiseConv(in_planes=in_channel, kernel_size=(conv_kt[layer_no], conv_kf[layer_no]), stride=(conv_st[layer_no], conv_sf[layer_no]), pad_mode='same', pad=0)) seq_cell.append( nn.BatchNorm2d(num_features=in_channel, momentum=0.98)) seq_cell.append(nn.ReLU()) seq_cell.append( nn.Conv2d(in_channels=in_channel, out_channels=conv_feat[layer_no], kernel_size=(1, 1), pad_mode="same")) seq_cell.append( nn.BatchNorm2d(num_features=conv_feat[layer_no], momentum=0.98)) seq_cell.append(nn.ReLU()) in_channel = conv_feat[layer_no] t_dim = math.ceil(t_dim / float(conv_st[layer_no])) f_dim = math.ceil(f_dim / float(conv_sf[layer_no])) seq_cell.append(nn.AvgPool2d(kernel_size=(t_dim, f_dim))) # to fix ? seq_cell.append(nn.Flatten()) seq_cell.append(nn.Dropout(model_settings['dropout1'])) seq_cell.append(nn.Dense(in_channel, label_count)) self.model = nn.SequentialCell(seq_cell)
def __init__(self, weights_update=False): """ VGG16 feature extraction Args: weights_updata(bool): whether update weights for top two layers, default is False. """ super(VGG16FeatureExtraction, self).__init__() self.relu = nn.ReLU() self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same") self.avg_pool = nn.AvgPool2d(kernel_size=2, stride=2) self.conv1_1 = _conv(in_channels=3, out_channels=64, kernel_size=3,\ padding=1, weights_update=weights_update) self.conv1_2 = _conv(in_channels=64, out_channels=64, kernel_size=3,\ padding=1, weights_update=weights_update) self.conv2_1 = _conv(in_channels=64, out_channels=128, kernel_size=3,\ padding=1, weights_update=weights_update) self.conv2_2 = _conv(in_channels=128, out_channels=128, kernel_size=3,\ padding=1, weights_update=weights_update) self.conv3_1 = _conv(in_channels=128, out_channels=256, kernel_size=3, padding=1) self.conv3_2 = _conv(in_channels=256, out_channels=256, kernel_size=3, padding=1) self.conv3_3 = _conv(in_channels=256, out_channels=256, kernel_size=3, padding=1) self.conv4_1 = _conv(in_channels=256, out_channels=512, kernel_size=3, padding=1) self.conv4_2 = _conv(in_channels=512, out_channels=512, kernel_size=3, padding=1) self.conv4_3 = _conv(in_channels=512, out_channels=512, kernel_size=3, padding=1) self.conv5_1 = _conv(in_channels=512, out_channels=512, kernel_size=3, padding=1) self.conv5_2 = _conv(in_channels=512, out_channels=512, kernel_size=3, padding=1) self.conv5_3 = _conv(in_channels=512, out_channels=512, kernel_size=3, padding=1) self.cast = P.Cast()
def __init__(self): super().__init__() self.seq = nn.SequentialCell([nn.AvgPool2d(3, 1), nn.ReLU(), nn.Flatten()])
def __init__(self, kernel_size, stride=None): super(AvgNet, self).__init__() self.avgpool = nn.AvgPool2d(kernel_size, stride)
def __init__(self, input_size=224, n_class=1000, model_size='1.0x'): super(ShuffleNetV2, self).__init__() print('model size is ', model_size) self.stage_repeats = [4, 8, 4] self.model_size = model_size if model_size == '0.5x': self.stage_out_channels = [-1, 24, 48, 96, 192, 1024] elif model_size == '1.0x': self.stage_out_channels = [-1, 24, 116, 232, 464, 1024] elif model_size == '1.5x': self.stage_out_channels = [-1, 24, 176, 352, 704, 1024] elif model_size == '2.0x': self.stage_out_channels = [-1, 24, 244, 488, 976, 2048] else: raise NotImplementedError # building first layer input_channel = self.stage_out_channels[1] self.first_conv = nn.SequentialCell([ nn.Conv2d(in_channels=3, out_channels=input_channel, kernel_size=3, stride=2, pad_mode='pad', padding=1, has_bias=False), nn.BatchNorm2d(num_features=input_channel, momentum=0.9), nn.ReLU(), ]) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same') self.features = [] for idxstage in range(len(self.stage_repeats)): numrepeat = self.stage_repeats[idxstage] output_channel = self.stage_out_channels[idxstage + 2] for i in range(numrepeat): if i == 0: self.features.append( ShuffleV2Block(input_channel, output_channel, mid_channels=output_channel // 2, ksize=3, stride=2)) else: self.features.append( ShuffleV2Block(input_channel // 2, output_channel, mid_channels=output_channel // 2, ksize=3, stride=1)) input_channel = output_channel self.features = nn.SequentialCell([*self.features]) self.conv_last = nn.SequentialCell([ nn.Conv2d(in_channels=input_channel, out_channels=self.stage_out_channels[-1], kernel_size=1, stride=1, pad_mode='pad', padding=0, has_bias=False), nn.BatchNorm2d(num_features=self.stage_out_channels[-1], momentum=0.9), nn.ReLU() ]) self.globalpool = nn.AvgPool2d(kernel_size=7, stride=7, pad_mode='valid') if self.model_size == '2.0x': self.dropout = nn.Dropout(keep_prob=0.8) self.classifier = nn.SequentialCell([ nn.Dense(in_channels=self.stage_out_channels[-1], out_channels=n_class, has_bias=False) ]) ##TODO init weights self._initialize_weights()
def test_avgpool2d_error_input(): """ test_avgpool2d_error_input """ kernel_size = 5 stride = 2.3 with pytest.raises(TypeError): nn.AvgPool2d(kernel_size, stride)
has_bias=False, bias_init=Tensor(np.ones([6]).astype(np.float32))), 'desc_inputs': [Tensor(np.ones(shape=[6, 1]).astype(np.float32))] }), ('MaxPool2d_1', { 'block': nn.MaxPool2d(5, pad_mode='same'), 'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))] }), ('MaxPool2d_2', { 'block': nn.MaxPool2d(5, pad_mode='valid'), 'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))] }), ('AvgPool2d_1', { 'block': nn.AvgPool2d(5, pad_mode='same'), 'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))] }), ('AvgPool2d_2', { 'block': nn.AvgPool2d(5, pad_mode='valid'), 'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))] }), ('Conv2D_1', { 'block': P.Conv2D(1, 6, pad_mode='same', pad=0), 'desc_inputs': [ Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32)), Tensor(np.ones(shape=[1, 5, 6, 6]).astype(np.float32)) ]
def __init__(self, num_classes, is_training=True, stem_filters=32, penultimate_filters=1056, filters_multiplier=2): super(NASNetAMobile, self).__init__() self.is_training = is_training self.stem_filters = stem_filters self.penultimate_filters = penultimate_filters self.filters_multiplier = filters_multiplier filters = self.penultimate_filters//24 # 24 is default value for the architecture self.conv0 = nn.SequentialCell([ nn.Conv2d(in_channels=3, out_channels=self.stem_filters, kernel_size=3, stride=2, pad_mode='pad', padding=0, has_bias=False), nn.BatchNorm2d(num_features=self.stem_filters, eps=0.001, momentum=0.9, affine=True) ]) self.cell_stem_0 = CellStem0( self.stem_filters, num_filters=filters//(filters_multiplier**2) ) self.cell_stem_1 = CellStem1( self.stem_filters, num_filters=filters//filters_multiplier ) self.cell_0 = FirstCell( in_channels_left=filters, out_channels_left=filters//2, # 1, 0.5 in_channels_right=2*filters, out_channels_right=filters ) # 2, 1 self.cell_1 = NormalCell( in_channels_left=2*filters, out_channels_left=filters, # 2, 1 in_channels_right=6*filters, out_channels_right=filters ) # 6, 1 self.cell_2 = NormalCell( in_channels_left=6*filters, out_channels_left=filters, # 6, 1 in_channels_right=6*filters, out_channels_right=filters ) # 6, 1 self.cell_3 = NormalCell( in_channels_left=6*filters, out_channels_left=filters, # 6, 1 in_channels_right=6*filters, out_channels_right=filters ) # 6, 1 self.reduction_cell_0 = ReductionCell0( in_channels_left=6*filters, out_channels_left=2*filters, # 6, 2 in_channels_right=6*filters, out_channels_right=2*filters ) # 6, 2 self.cell_6 = FirstCell( in_channels_left=6*filters, out_channels_left=filters, # 6, 1 in_channels_right=8*filters, out_channels_right=2*filters ) # 8, 2 self.cell_7 = NormalCell( in_channels_left=8*filters, out_channels_left=2*filters, # 8, 2 in_channels_right=12*filters, out_channels_right=2*filters ) # 12, 2 self.cell_8 = NormalCell( in_channels_left=12*filters, out_channels_left=2*filters, # 12, 2 in_channels_right=12*filters, out_channels_right=2*filters ) # 12, 2 self.cell_9 = NormalCell( in_channels_left=12*filters, out_channels_left=2*filters, # 12, 2 in_channels_right=12*filters, out_channels_right=2*filters ) # 12, 2 if is_training: self.aux_logits = AuxLogits(in_channels=12*filters, out_channels=num_classes) self.reduction_cell_1 = ReductionCell1( in_channels_left=12*filters, out_channels_left=4*filters, # 12, 4 in_channels_right=12*filters, out_channels_right=4*filters ) # 12, 4 self.cell_12 = FirstCell( in_channels_left=12*filters, out_channels_left=2*filters, # 12, 2 in_channels_right=16*filters, out_channels_right=4*filters ) # 16, 4 self.cell_13 = NormalCell( in_channels_left=16*filters, out_channels_left=4*filters, # 16, 4 in_channels_right=24*filters, out_channels_right=4*filters ) # 24, 4 self.cell_14 = NormalCell( in_channels_left=24*filters, out_channels_left=4*filters, # 24, 4 in_channels_right=24*filters, out_channels_right=4*filters ) # 24, 4 self.cell_15 = NormalCell( in_channels_left=24*filters, out_channels_left=4*filters, # 24, 4 in_channels_right=24*filters, out_channels_right=4*filters ) # 24, 4 self.relu = nn.ReLU() self.dropout = nn.Dropout(keep_prob=0.5) self.classifier = nn.Dense(in_channels=24*filters, out_channels=num_classes) self.shape = P.Shape() self.reshape = P.Reshape() self.avg_pool = nn.AvgPool2d(kernel_size=7, stride=1) self._initialize_weights()
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = nn.SequentialCell([ nn.ReLU(), nn.Conv2d(in_channels=in_channels_left, out_channels=out_channels_left, kernel_size=1, stride=1, pad_mode='pad', has_bias=False), nn.BatchNorm2d(num_features=out_channels_left, eps=0.001, momentum=0.9, affine=True)]) self.conv_1x1 = nn.SequentialCell([ nn.ReLU(), nn.Conv2d(in_channels=in_channels_right, out_channels=out_channels_right, kernel_size=1, stride=1, pad_mode='pad', has_bias=False), nn.BatchNorm2d(num_features=out_channels_right, eps=0.001, momentum=0.9, affine=True)]) self.comb_iter_0_left = BranchSeparables( out_channels_right, out_channels_right, 5, 2, 2, bias=False ) self.comb_iter_0_right = BranchSeparables( out_channels_right, out_channels_right, 7, 2, 3, bias=False ) self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, pad_mode='same') self.comb_iter_1_right = BranchSeparables( out_channels_right, out_channels_right, 7, 2, 3, bias=False ) self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, pad_mode='same') self.comb_iter_2_right = BranchSeparables( out_channels_right, out_channels_right, 5, 2, 2, bias=False ) self.comb_iter_3_right = nn.AvgPool2d(kernel_size=3, stride=1, pad_mode='same') self.comb_iter_4_left = BranchSeparables( out_channels_right, out_channels_right, 3, 1, 1, bias=False ) self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, pad_mode='same')