def __init__(self):
        super(Policy, self).__init__()

        self.affine1 = nn.Linear(4, 128)
        self.action_head = nn.Linear(128, 2)
        self.value_head = nn.Linear(128, 1)

        self.saved_actions = []
        self.rewards = []
Beispiel #2
0
 def __init__(self, input_dim, d_key=None, d_value=None, n_head=1, dropout_rate=0.):
     super(MultiHeadAttention, self).__init__()
     self.n_head = n_head
     self.d_key = input_dim // n_head if d_key is None else d_value
     self.d_value = input_dim // n_head if d_value is None else d_value
     self.dropout_rate = dropout_rate
     self.q_proj = nn.Linear(input_dim, self.d_key * self.n_head)
     self.k_proj = nn.Linear(input_dim, self.d_key * self.n_head)
     self.v_proj = nn.Linear(input_dim, self.d_value * self.n_head)
Beispiel #3
0
    def __init__(self):
        super(Policy, self).__init__()

        self.affine1 = nn.Linear(4, 128)
        self.affine2 = nn.Linear(128, 2)
        self.dropout_ratio = 0.6

        self.saved_log_probs = []
        self.rewards = []
Beispiel #4
0
    def __init__(self, t_dim, num_class):
        super(REModel, self).__init__()
        self.pc_fc1 = nn.Linear(t_dim, t_dim)
        self.pc_fc2 = nn.Linear(t_dim, num_class)

        self.multi_head_attn = MultiHeadAttention(t_dim, n_head=8)

        self.conv1d = Conv1d(t_dim * 4, t_dim, filter_size=3, padding=1)

        self.po_fc = nn.Linear(t_dim, 1)
        self.po1_fc = nn.Linear(t_dim, num_class)
        self.po2_fc = nn.Linear(t_dim, num_class)
Beispiel #5
0
    def __init__(self):
        super(ModelLinear, self).__init__()
        models = []
        with supernet(expand_ratio=(1, 2, 4)) as ofa_super:
            models1 = []
            models1 += [nn.Embedding(size=(64, 64))]
            models1 += [nn.Linear(64, 128)]
            models1 += [nn.LayerNorm(128)]
            models1 += [nn.Linear(128, 256)]
            models1 = ofa_super.convert(models1)

        models += models1
        self.models = paddle.nn.Sequential(*models)
Beispiel #6
0
    def __init__(self,
                 img_size=256,
                 style_dim=64,
                 num_domains=2,
                 max_conv_dim=512):
        super().__init__()
        self.num_domains = num_domains
        dim_in = 2**14 // img_size
        blocks = []
        blocks += [nn.Conv2D(3, dim_in, 3, 1, 1)]

        repeat_num = int(np.log2(img_size)) - 2
        for _ in range(repeat_num):
            dim_out = min(dim_in * 2, max_conv_dim)
            blocks += [ResBlk(dim_in, dim_out, downsample=True)]
            dim_in = dim_out

        blocks += [LeakyRelu(alpha=0.2)]
        blocks += [nn.Conv2D(dim_out, dim_out, 4, 1, 0)]
        blocks += [LeakyRelu(alpha=0.2)]
        self.shared = fluid.dygraph.Sequential(*blocks)

        self.unshared = fluid.dygraph.Sequential()
        for _ in range(num_domains):
            self.unshared.add_sublayer(f'lsub_{_}',
                                       nn.Linear(dim_out, style_dim))
Beispiel #7
0
    def __init__(self, t_dim, maxlen, char_num, word_num, word_dim, word_vec, num_class,
                 padding_idx=0, dropout_rate=0.25):
        super(KGModel, self).__init__()
        self.pe = nn.Embedding(size=[maxlen, t_dim],
                               param_attr=fluid.ParamAttr(name="position_embedding.w_0",
                                                          initializer=fluid.initializer.ConstantInitializer(
                                                              value=0.)))

        self.ce = nn.Embedding(size=[char_num, t_dim], padding_idx=padding_idx,
                               param_attr=fluid.ParamAttr(name="char_embedding.w_0"))

        self.we_p = nn.Embedding(size=[word_num, word_dim], padding_idx=padding_idx,
                                 param_attr=fluid.ParamAttr(name="word_embedding.w_0",
                                                            initializer=fluid.initializer.NumpyArrayInitializer(
                                                                word_vec),
                                                            trainable=False))

        self.we = nn.Linear(word_dim, t_dim, param_attr=fluid.ParamAttr(name="word_embedding.w_1"), bias_attr=False)

        self.er_model = ERModel(t_dim)

        self.re_model = REModel(t_dim, num_class)

        self.padding_idx = padding_idx

        self.dropout_rate = dropout_rate
Beispiel #8
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 in_size,
                 is_bias=True,
                 is_bn=True,
                 is_relu=True,
                 is_test=False):
        super(FCBNReluLayer, self).__init__()
        self.is_bn = is_bn
        self.is_relu = is_relu

        if is_bias:
            bias_init = fluid.ParamAttr(
                initializer=fluid.initializer.ConstantInitializer(0.))
        else:
            bias_init = False
        self.linear = nn.Linear(in_channels * in_size * in_size,
                                out_channels,
                                bias_attr=bias_init)
        self.bn = nn.BatchNorm(out_channels,
                               param_attr=norm_weight_init(),
                               bias_attr=norm_bias_init(),
                               act=None,
                               momentum=0.9,
                               use_global_stats=is_test)
Beispiel #9
0
 def __init__(self, inplanes=-1, planes=400, loss_weight=0.5):
     super(AuxHead, self).__init__()
     self.convs = \
         ConvModule(inplanes, inplanes * 2, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1), bias=False)
     self.loss_weight = loss_weight
     self.dropout = nn.Dropout(p=0.5)
     self.fc = nn.Linear(inplanes * 2, planes)
Beispiel #10
0
 def __init__(self):
     super(ClsLite, self).__init__()
     self.conv_cfg = dict(type='Conv')
     self.norm_cfg = dict(type='BN')
     self._make_stem_layer()
     self.avgpool = nn.Pool2D(pool_type='avg', global_pooling=True)
     self.fc = nn.Linear(128, 2, act='softmax')
Beispiel #11
0
 def __init__(self):
     super(Model, self).__init__()
     with supernet(kernel_size=(3, 5, 7), expand_ratio=[1, 2,
                                                        4]) as ofa_super:
         models = []
         models += [nn.Conv2D(1, 6, 3)]
         models += [ReLU()]
         models += [nn.Pool2D(2, 'max', 2)]
         models += [nn.Conv2D(6, 16, 5, padding=0)]
         models += [ReLU()]
         models += [nn.Pool2D(2, 'max', 2)]
         models += [
             nn.Linear(784, 120),
             nn.Linear(120, 84),
             nn.Linear(84, 10)
         ]
         models = ofa_super.convert(models)
     self.models = paddle.nn.Sequential(*models)
Beispiel #12
0
    def __init__(self):
        super(ModelLinear, self).__init__()
        models = []
        with supernet(expand_ratio=(1, 2, 4)) as ofa_super:
            models1 = []
            models1 += [nn.Linear(64, 128)]
            models1 += [nn.Linear(128, 256)]
            models1 = ofa_super.convert(models1)

        models += models1

        with supernet(channel=((64, 128, 256), (64, 128, 256))) as ofa_super:
            models1 = []
            models1 += [nn.Linear(256, 128)]
            models1 += [nn.Linear(128, 256)]
            models1 = ofa_super.convert(models1)

        models += models1

        self.models = paddle.nn.Sequential(*models)
Beispiel #13
0
 def __init__(self, style_dim, num_features):
     super().__init__()
     self.norm = fluid.dygraph.InstanceNorm(
         num_features,
         epsilon=1e-05,
         param_attr=fluid.ParamAttr(
             initializer=fluid.initializer.Constant(1.0), trainable=False),
         bias_attr=fluid.ParamAttr(
             initializer=fluid.initializer.Constant(0.0), trainable=False),
         dtype='float32')  # affine=False,对应代码中的两个参数设置
     self.fc = nn.Linear(style_dim, num_features * 2)
Beispiel #14
0
    def __init__(self, t_dim):
        super(ERModel, self).__init__()
        self.dilated_gated_conv1d_list = []
        for i in range(3):
            self.dilated_gated_conv1d_list.append(
                DilatedGatedConv1d(t_dim, t_dim, filter_size=3, padding=1, dilation=1))
            self.dilated_gated_conv1d_list.append(
                DilatedGatedConv1d(t_dim, t_dim, filter_size=3, padding=2, dilation=2))
            self.dilated_gated_conv1d_list.append(
                DilatedGatedConv1d(t_dim, t_dim, filter_size=3, padding=5, dilation=5))
        self.dilated_gated_conv1d_list.append(DilatedGatedConv1d(t_dim, t_dim, filter_size=3, padding=1, dilation=1))
        self.dilated_gated_conv1d_list.append(DilatedGatedConv1d(t_dim, t_dim, filter_size=3, padding=1, dilation=1))
        self.dilated_gated_conv1d_list.append(DilatedGatedConv1d(t_dim, t_dim, filter_size=3, padding=1, dilation=1))

        self.pn1_fc1 = nn.Linear(t_dim, t_dim)
        self.pn1_fc2 = nn.Linear(t_dim, 1)

        self.pn2_fc1 = nn.Linear(t_dim, t_dim)
        self.pn2_fc2 = nn.Linear(t_dim, 1)

        self.multi_head_attn = MultiHeadAttention(t_dim, n_head=8)

        self.conv1d = Conv1d(t_dim * 2, t_dim, filter_size=3, padding=1)

        self.ps1_fc = nn.Linear(t_dim, 1)
        self.ps2_fc = nn.Linear(t_dim, 1)
Beispiel #15
0
    def __init__(self, name_scope, num_classes=1):
        super(LeNet, self).__init__(name_scope)

        # 创建卷积和池化层块,每个卷积层使用Sigmoid激活函数,后面跟着一个2x2的池化
        self.conv1 = nn.Conv2D(num_channels=1,
                               num_filters=6,
                               filter_size=5,
                               act='sigmoid')
        self.pool1 = nn.Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        self.conv2 = nn.Conv2D(num_channels=6,
                               num_filters=16,
                               filter_size=5,
                               act='sigmoid')
        self.pool2 = nn.Pool2D(pool_size=2, pool_stride=2, pool_type='max')
        # 创建第3个卷积层
        self.conv3 = nn.Conv2D(num_channels=16,
                               num_filters=120,
                               filter_size=4,
                               act='sigmoid')
        # 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分裂标签的类别数
        self.fc1 = nn.Linear(input_dim=120, output_dim=64, act='sigmoid')
        self.fc2 = nn.Linear(input_dim=64, output_dim=num_classes)
Beispiel #16
0
 def __init__(self,
              backbone,
              neck,
              head,
              train_cfg=None,
              test_cfg=None,
              pretrained=None):
     super(Classifier, self).__init__()
     self.dropout = backbone.pop('dropout')
     self.backbone = ResNet(**backbone)
     self.train_cfg = train_cfg
     self.test_cfg = test_cfg
     self.triple_loss = TripletLoss()
     self.avgpool = nn.Pool2D(pool_type='avg', global_pooling=True)
     self.fc = nn.Linear(512, 2, act='softmax')
     self.init_weights(pretrained=pretrained)
    def __init__(self, latent_dim=16, style_dim=64, num_domains=2):
        super().__init__()
        layers = []
        layers += [nn.Linear(latent_dim, 512, act='relu')]
        for _ in range(3):
            layers += [nn.Linear(512, 512, act='relu')]
        self.shared = fluid.dygraph.Sequential(*layers)

        self.unshared = fluid.dygraph.Sequential()
        for _ in range(num_domains):
            sublayer = fluid.dygraph.Sequential(
                nn.Linear(512, 512, act='relu'), nn.Linear(512,
                                                           512,
                                                           act='relu'),
                nn.Linear(512, 512, act='relu'), nn.Linear(512, style_dim))
            self.unshared.add_sublayer(f'lsub_{_}', sublayer)
Beispiel #18
0
    def __init__(self,
                 name,
                 Block,
                 layers,
                 num_classes=1000,
                 groups=1,
                 is_test=False):
        """

        :param name: str, namescope
        :param layers: int, the layer of defined network
        :param num_classes: int, the dimension of final output
        :param groups: int, default is 1
        """
        super(ResNet, self).__init__(name_scope=name)

        support_layers = [18, 34, 50, 101, 152]
        assert layers in support_layers, \
            "support layer can only be one of [18, 34, 50, 101, 152]"
        self.layers = layers

        if layers == 18:
            depths = [2, 2, 2, 2]
        elif layers == 50 or layers == 34:
            depths = [3, 4, 6, 3]
        elif layers == 101:
            depths = [3, 4, 23, 3]
        elif layers == 152:
            depths = [3, 8, 36, 3]

        strides = [1, 2, 2, 2]
        num_filters = [64, 128, 256, 512]

        self.in_channels = 64
        self.dilation = 1
        self.groups = groups

        self.conv_bn_init = ConvBNLayer(3,
                                        out_channels=self.in_channels,
                                        filter_size=7,
                                        stride=2,
                                        is_test=is_test)

        block_collect = []
        downsample = None
        for i in range(len(depths)):
            # collect layers in each block
            _block = []

            stride = strides[i]
            out_channel = num_filters[i]

            if stride != 1 or self.in_channels != num_filters[
                    i] * Block.expansion:
                downsample = True
            bottleneck_block = self.add_sublayer(
                "block{}_0".format(i),
                Block(self.in_channels,
                      out_channel,
                      stride=stride,
                      is_downsample=downsample,
                      is_test=is_test))

            downsample = False

            _block.append(bottleneck_block)

            self.in_channels = num_filters[i] * Block.expansion

            for j in range(1, depths[i]):
                bottleneck_block = self.add_sublayer(
                    "block{}_{}".format(i, j),
                    Block(self.in_channels, out_channel, is_test=is_test))
                _block.append(bottleneck_block)

            # collect blocks
            block_collect.append(_block)

        self.block_collect = block_collect

        self.maxpool = nn.Pool2D(pool_size=3,
                                 pool_stride=2,
                                 pool_padding=1,
                                 pool_type="max")

        self.global_pool = nn.Pool2D(pool_type='avg', global_pooling=True)
        self.fc = nn.Linear(input_dim=512 * Block.expansion,
                            output_dim=num_classes)
 def __init__(self):
     super(SiameseNetwork, self).__init__()
     self.model = MobileNet()
     self.linear1 = nn.Linear(input_dim=3 * 8 * 8, output_dim=3 * 8 * 4)
     self.linear2 = nn.Linear(input_dim=3 * 8 * 4, output_dim=3 * 8 * 2)
     self.linear3 = nn.Linear(input_dim=3 * 8 * 2, output_dim=24)
Beispiel #20
0
    def __init__(self,
                 name,
                 input_dim=(128, 256),
                 pred_input_dim=(256, 256),
                 pred_inter_dim=(256, 256),
                 is_test=False):
        super(AtomIouNet, self).__init__(name)
        self.name = self.full_name()
        self.conv3_1r = ConvBNReluLayer(input_dim[0],
                                        128,
                                        filter_size=3,
                                        stride=1,
                                        is_test=is_test)
        self.conv3_1t = ConvBNReluLayer(input_dim[0],
                                        256,
                                        filter_size=3,
                                        stride=1,
                                        is_test=is_test)

        self.conv3_2t = ConvBNReluLayer(256,
                                        pred_input_dim[0],
                                        filter_size=3,
                                        stride=1,
                                        is_test=is_test)

        self.fc3_1r = ConvBNReluLayer(128,
                                      256,
                                      filter_size=3,
                                      stride=1,
                                      padding=0,
                                      is_test=is_test)

        self.conv4_1r = ConvBNReluLayer(input_dim[1],
                                        256,
                                        filter_size=3,
                                        stride=1,
                                        is_test=is_test)
        self.conv4_1t = ConvBNReluLayer(input_dim[1],
                                        256,
                                        filter_size=3,
                                        stride=1,
                                        is_test=is_test)

        self.conv4_2t = ConvBNReluLayer(256,
                                        pred_input_dim[1],
                                        filter_size=3,
                                        stride=1,
                                        is_test=is_test)

        self.fc34_3r = ConvBNReluLayer(512,
                                       pred_input_dim[0],
                                       filter_size=1,
                                       stride=1,
                                       padding=0,
                                       is_test=is_test)
        self.fc34_4r = ConvBNReluLayer(512,
                                       pred_input_dim[1],
                                       filter_size=1,
                                       stride=1,
                                       padding=0,
                                       is_test=is_test)

        self.fc3_rt = FCBNReluLayer(pred_input_dim[0],
                                    pred_inter_dim[0],
                                    in_size=5,
                                    is_test=is_test)
        self.fc4_rt = FCBNReluLayer(pred_input_dim[1],
                                    pred_inter_dim[1],
                                    in_size=3,
                                    is_test=is_test)

        bias_init = fluid.initializer.ConstantInitializer(0.)
        self.iou_predictor = nn.Linear(pred_inter_dim[0] + pred_inter_dim[1],
                                       1,
                                       bias_attr=bias_init)

        self.outs = {}