def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.ape = ape self.patch_norm = patch_norm self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) num_patches = self.patch_embed.num_patches patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution # absolute position embedding if self.ape: self.absolute_pos_embed = self.create_parameter(shape=(1, num_patches, embed_dim),default_initializer=nn.initializer.Constant(value=0)) self.add_parameter("absolute_pos_embed", self.absolute_pos_embed) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth dpr = [x for x in paddle.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule # build layers self.layers = nn.LayerList() for i_layer in range(self.num_layers): layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer), input_resolution=(patches_resolution[0] // (2 ** i_layer), patches_resolution[1] // (2 ** i_layer)), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None ) self.layers.append(layer) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1D(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else Identity() self.apply(self._init_weights)
def __init__(self, in_channels=3, out_classes=5, hid=64, num=64): super(FallNet, self).__init__() self.cnn0 = Block(in_channels, hid, 7, 0) self.cnn1 = Block(hid, hid, 5, 0) self.cnn2 = Block(hid, hid, 3, 0) self.cnn3 = Block(hid, hid, 1, 0) self.avg = nn.AdaptiveAvgPool1D(output_size=num) # self.rnn0 = nn.LSTM(input_size=145, hidden_size=num, dropout=.2, num_layers=3) self.rnn0 = nn.GRU(input_size=145, hidden_size=num, num_layers=1, dropout=0.2) self.rnn1 = Block(hid, hid, 1, 0) self.rnn2 = Block(hid, 4, 3, 0) self.cls = nn.Sequential( nn.Linear(in_features=1016, out_features=128), nn.Dropout(p=.2), nn.Linear(in_features=128, out_features=out_classes), nn.Softmax(axis=1))
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=4, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.ape = ape self.patch_norm = patch_norm self.num_features = int(embed_dim * 2**(self.num_layers - 1)) self.mlp_ratio = mlp_ratio # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) num_patches = self.patch_embed.num_patches patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution # absolute position embedding if self.ape: attr = ParamAttr(initializer=nn.initializer.Constant(0)) self.absolute_pos_embed = self.create_parameter(shape=(1, num_patches, embed_dim), attr=attr) paddle.assign(trunc_norm_(self.absolute_pos_embed.shape, std=0.02), self.absolute_pos_embed) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth dpr = [x for x in np.linspace(0, drop_path_rate, sum(depths)) ] # stochastic depth decay rule # build layers self.layers = nn.LayerList() for i_layer in range(self.num_layers): layer = BasicLayer( dim=int(embed_dim * 2**i_layer), input_resolution=(patches_resolution[0] // (2**i_layer), patches_resolution[1] // (2**i_layer)), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layer) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1D(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else Identity() for m in self.sublayers(): if isinstance(m, nn.LayerNorm): paddle.assign(paddle.zeros(m.bias.shape), m.bias) paddle.assign(paddle.ones(m.weight.shape), m.weight) if isinstance(m, nn.Linear): try: paddle.assign(trunc_norm_(m.weight.shape, std=0.02), m.weight) except: print(m.weight.shape) if m.bias is not None: paddle.assign(paddle.zeros(m.bias.shape), m.bias)
def __init__( self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, class_dim=1000, with_pool=True, **kwargs, ): super().__init__() self.class_dim = class_dim self.with_pool = with_pool self.num_layers = len(depths) self.embed_dim = embed_dim self.ape = ape self.patch_norm = patch_norm self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None, ) num_patches = self.patch_embed.num_patches patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution # absolute position embedding if self.ape: self.absolute_pos_embed = add_parameter( self, paddle.zeros((1, num_patches, embed_dim)) ) trunc_normal_(self.absolute_pos_embed) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth dpr = np.linspace(0, drop_path_rate, sum(depths)) # build layers self.layers = nn.LayerList() for i_layer in range(self.num_layers): layer = BasicLayer( dim=int(embed_dim * 2 ** i_layer), input_resolution=( patches_resolution[0] // (2 ** i_layer), patches_resolution[1] // (2 ** i_layer), ), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, ) self.layers.append(layer) self.norm = norm_layer(self.num_features) if with_pool: self.avgpool = nn.AdaptiveAvgPool1D(1) if class_dim > 0: self.head = nn.Linear(self.num_features, class_dim) self.apply(self._init_weights)
def func_test_layer_str(self): module = nn.ELU(0.2) self.assertEqual(str(module), 'ELU(alpha=0.2)') module = nn.CELU(0.2) self.assertEqual(str(module), 'CELU(alpha=0.2)') module = nn.GELU(True) self.assertEqual(str(module), 'GELU(approximate=True)') module = nn.Hardshrink() self.assertEqual(str(module), 'Hardshrink(threshold=0.5)') module = nn.Hardswish(name="Hardswish") self.assertEqual(str(module), 'Hardswish(name=Hardswish)') module = nn.Tanh(name="Tanh") self.assertEqual(str(module), 'Tanh(name=Tanh)') module = nn.Hardtanh(name="Hardtanh") self.assertEqual(str(module), 'Hardtanh(min=-1.0, max=1.0, name=Hardtanh)') module = nn.PReLU(1, 0.25, name="PReLU", data_format="NCHW") self.assertEqual( str(module), 'PReLU(num_parameters=1, data_format=NCHW, init=0.25, dtype=float32, name=PReLU)' ) module = nn.ReLU() self.assertEqual(str(module), 'ReLU()') module = nn.ReLU6() self.assertEqual(str(module), 'ReLU6()') module = nn.SELU() self.assertEqual( str(module), 'SELU(scale=1.0507009873554805, alpha=1.6732632423543772)') module = nn.LeakyReLU() self.assertEqual(str(module), 'LeakyReLU(negative_slope=0.01)') module = nn.Sigmoid() self.assertEqual(str(module), 'Sigmoid()') module = nn.Hardsigmoid() self.assertEqual(str(module), 'Hardsigmoid()') module = nn.Softplus() self.assertEqual(str(module), 'Softplus(beta=1, threshold=20)') module = nn.Softshrink() self.assertEqual(str(module), 'Softshrink(threshold=0.5)') module = nn.Softsign() self.assertEqual(str(module), 'Softsign()') module = nn.Swish() self.assertEqual(str(module), 'Swish()') module = nn.Tanhshrink() self.assertEqual(str(module), 'Tanhshrink()') module = nn.ThresholdedReLU() self.assertEqual(str(module), 'ThresholdedReLU(threshold=1.0)') module = nn.LogSigmoid() self.assertEqual(str(module), 'LogSigmoid()') module = nn.Softmax() self.assertEqual(str(module), 'Softmax(axis=-1)') module = nn.LogSoftmax() self.assertEqual(str(module), 'LogSoftmax(axis=-1)') module = nn.Maxout(groups=2) self.assertEqual(str(module), 'Maxout(groups=2, axis=1)') module = nn.Linear(2, 4, name='linear') self.assertEqual( str(module), 'Linear(in_features=2, out_features=4, dtype=float32, name=linear)' ) module = nn.Upsample(size=[12, 12]) self.assertEqual( str(module), 'Upsample(size=[12, 12], mode=nearest, align_corners=False, align_mode=0, data_format=NCHW)' ) module = nn.UpsamplingNearest2D(size=[12, 12]) self.assertEqual( str(module), 'UpsamplingNearest2D(size=[12, 12], data_format=NCHW)') module = nn.UpsamplingBilinear2D(size=[12, 12]) self.assertEqual( str(module), 'UpsamplingBilinear2D(size=[12, 12], data_format=NCHW)') module = nn.Bilinear(in1_features=5, in2_features=4, out_features=1000) self.assertEqual( str(module), 'Bilinear(in1_features=5, in2_features=4, out_features=1000, dtype=float32)' ) module = nn.Dropout(p=0.5) self.assertEqual(str(module), 'Dropout(p=0.5, axis=None, mode=upscale_in_train)') module = nn.Dropout2D(p=0.5) self.assertEqual(str(module), 'Dropout2D(p=0.5, data_format=NCHW)') module = nn.Dropout3D(p=0.5) self.assertEqual(str(module), 'Dropout3D(p=0.5, data_format=NCDHW)') module = nn.AlphaDropout(p=0.5) self.assertEqual(str(module), 'AlphaDropout(p=0.5)') module = nn.Pad1D(padding=[1, 2], mode='constant') self.assertEqual( str(module), 'Pad1D(padding=[1, 2], mode=constant, value=0.0, data_format=NCL)') module = nn.Pad2D(padding=[1, 0, 1, 2], mode='constant') self.assertEqual( str(module), 'Pad2D(padding=[1, 0, 1, 2], mode=constant, value=0.0, data_format=NCHW)' ) module = nn.ZeroPad2D(padding=[1, 0, 1, 2]) self.assertEqual(str(module), 'ZeroPad2D(padding=[1, 0, 1, 2], data_format=NCHW)') module = nn.Pad3D(padding=[1, 0, 1, 2, 0, 0], mode='constant') self.assertEqual( str(module), 'Pad3D(padding=[1, 0, 1, 2, 0, 0], mode=constant, value=0.0, data_format=NCDHW)' ) module = nn.CosineSimilarity(axis=0) self.assertEqual(str(module), 'CosineSimilarity(axis=0, eps=1e-08)') module = nn.Embedding(10, 3, sparse=True) self.assertEqual(str(module), 'Embedding(10, 3, sparse=True)') module = nn.Conv1D(3, 2, 3) self.assertEqual(str(module), 'Conv1D(3, 2, kernel_size=[3], data_format=NCL)') module = nn.Conv1DTranspose(2, 1, 2) self.assertEqual( str(module), 'Conv1DTranspose(2, 1, kernel_size=[2], data_format=NCL)') module = nn.Conv2D(4, 6, (3, 3)) self.assertEqual(str(module), 'Conv2D(4, 6, kernel_size=[3, 3], data_format=NCHW)') module = nn.Conv2DTranspose(4, 6, (3, 3)) self.assertEqual( str(module), 'Conv2DTranspose(4, 6, kernel_size=[3, 3], data_format=NCHW)') module = nn.Conv3D(4, 6, (3, 3, 3)) self.assertEqual( str(module), 'Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)') module = nn.Conv3DTranspose(4, 6, (3, 3, 3)) self.assertEqual( str(module), 'Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)') module = nn.PairwiseDistance() self.assertEqual(str(module), 'PairwiseDistance(p=2.0)') module = nn.InstanceNorm1D(2) self.assertEqual(str(module), 'InstanceNorm1D(num_features=2, epsilon=1e-05)') module = nn.InstanceNorm2D(2) self.assertEqual(str(module), 'InstanceNorm2D(num_features=2, epsilon=1e-05)') module = nn.InstanceNorm3D(2) self.assertEqual(str(module), 'InstanceNorm3D(num_features=2, epsilon=1e-05)') module = nn.GroupNorm(num_channels=6, num_groups=6) self.assertEqual( str(module), 'GroupNorm(num_groups=6, num_channels=6, epsilon=1e-05)') module = nn.LayerNorm([2, 2, 3]) self.assertEqual( str(module), 'LayerNorm(normalized_shape=[2, 2, 3], epsilon=1e-05)') module = nn.BatchNorm1D(1) self.assertEqual( str(module), 'BatchNorm1D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCL)' ) module = nn.BatchNorm2D(1) self.assertEqual( str(module), 'BatchNorm2D(num_features=1, momentum=0.9, epsilon=1e-05)') module = nn.BatchNorm3D(1) self.assertEqual( str(module), 'BatchNorm3D(num_features=1, momentum=0.9, epsilon=1e-05, data_format=NCDHW)' ) module = nn.SyncBatchNorm(2) self.assertEqual( str(module), 'SyncBatchNorm(num_features=2, momentum=0.9, epsilon=1e-05)') module = nn.LocalResponseNorm(size=5) self.assertEqual( str(module), 'LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1.0)') module = nn.AvgPool1D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'AvgPool1D(kernel_size=2, stride=2, padding=0)') module = nn.AvgPool2D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'AvgPool2D(kernel_size=2, stride=2, padding=0)') module = nn.AvgPool3D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'AvgPool3D(kernel_size=2, stride=2, padding=0)') module = nn.MaxPool1D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'MaxPool1D(kernel_size=2, stride=2, padding=0)') module = nn.MaxPool2D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'MaxPool2D(kernel_size=2, stride=2, padding=0)') module = nn.MaxPool3D(kernel_size=2, stride=2, padding=0) self.assertEqual(str(module), 'MaxPool3D(kernel_size=2, stride=2, padding=0)') module = nn.AdaptiveAvgPool1D(output_size=16) self.assertEqual(str(module), 'AdaptiveAvgPool1D(output_size=16)') module = nn.AdaptiveAvgPool2D(output_size=3) self.assertEqual(str(module), 'AdaptiveAvgPool2D(output_size=3)') module = nn.AdaptiveAvgPool3D(output_size=3) self.assertEqual(str(module), 'AdaptiveAvgPool3D(output_size=3)') module = nn.AdaptiveMaxPool1D(output_size=16, return_mask=True) self.assertEqual( str(module), 'AdaptiveMaxPool1D(output_size=16, return_mask=True)') module = nn.AdaptiveMaxPool2D(output_size=3, return_mask=True) self.assertEqual(str(module), 'AdaptiveMaxPool2D(output_size=3, return_mask=True)') module = nn.AdaptiveMaxPool3D(output_size=3, return_mask=True) self.assertEqual(str(module), 'AdaptiveMaxPool3D(output_size=3, return_mask=True)') module = nn.SimpleRNNCell(16, 32) self.assertEqual(str(module), 'SimpleRNNCell(16, 32)') module = nn.LSTMCell(16, 32) self.assertEqual(str(module), 'LSTMCell(16, 32)') module = nn.GRUCell(16, 32) self.assertEqual(str(module), 'GRUCell(16, 32)') module = nn.PixelShuffle(3) self.assertEqual(str(module), 'PixelShuffle(upscale_factor=3)') module = nn.SimpleRNN(16, 32, 2) self.assertEqual( str(module), 'SimpleRNN(16, 32, num_layers=2\n (0): RNN(\n (cell): SimpleRNNCell(16, 32)\n )\n (1): RNN(\n (cell): SimpleRNNCell(32, 32)\n )\n)' ) module = nn.LSTM(16, 32, 2) self.assertEqual( str(module), 'LSTM(16, 32, num_layers=2\n (0): RNN(\n (cell): LSTMCell(16, 32)\n )\n (1): RNN(\n (cell): LSTMCell(32, 32)\n )\n)' ) module = nn.GRU(16, 32, 2) self.assertEqual( str(module), 'GRU(16, 32, num_layers=2\n (0): RNN(\n (cell): GRUCell(16, 32)\n )\n (1): RNN(\n (cell): GRUCell(32, 32)\n )\n)' ) module1 = nn.Sequential( ('conv1', nn.Conv2D(1, 20, 5)), ('relu1', nn.ReLU()), ('conv2', nn.Conv2D(20, 64, 5)), ('relu2', nn.ReLU())) self.assertEqual( str(module1), 'Sequential(\n '\ '(conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n '\ '(relu1): ReLU()\n '\ '(conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n '\ '(relu2): ReLU()\n)' ) module2 = nn.Sequential( nn.Conv3DTranspose(4, 6, (3, 3, 3)), nn.AvgPool3D(kernel_size=2, stride=2, padding=0), nn.Tanh(name="Tanh"), module1, nn.Conv3D(4, 6, (3, 3, 3)), nn.MaxPool3D(kernel_size=2, stride=2, padding=0), nn.GELU(True)) self.assertEqual( str(module2), 'Sequential(\n '\ '(0): Conv3DTranspose(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n '\ '(1): AvgPool3D(kernel_size=2, stride=2, padding=0)\n '\ '(2): Tanh(name=Tanh)\n '\ '(3): Sequential(\n (conv1): Conv2D(1, 20, kernel_size=[5, 5], data_format=NCHW)\n (relu1): ReLU()\n'\ ' (conv2): Conv2D(20, 64, kernel_size=[5, 5], data_format=NCHW)\n (relu2): ReLU()\n )\n '\ '(4): Conv3D(4, 6, kernel_size=[3, 3, 3], data_format=NCDHW)\n '\ '(5): MaxPool3D(kernel_size=2, stride=2, padding=0)\n '\ '(6): GELU(approximate=True)\n)' )