Beispiel #1
0
    def __init__(self, state_dim, action_dim, mid_dim):
        super().__init__()
        self.enc_s = nn.Sequential(
            nn.Linear(state_dim, mid_dim),
            nn.ReLU(),
            nn.Linear(mid_dim, mid_dim),
        )
        self.enc_a = nn.Sequential(
            nn.Linear(action_dim, mid_dim),
            nn.ReLU(),
            nn.Linear(mid_dim, mid_dim),
        )

        self.net = DenseNet(mid_dim)
        net_out_dim = self.net.out_dim

        self.dec_a = nn.Sequential(
            nn.Linear(net_out_dim, mid_dim),
            nn.Hardswish(),
            nn.Linear(mid_dim, action_dim),
            nn.Tanh(),
        )
        self.dec_q = nn.Sequential(
            nn.Linear(net_out_dim, mid_dim),
            nn.Hardswish(),
            nn.utils.spectral_norm(nn.Linear(mid_dim, 1)),
        )
Beispiel #2
0
 def __init__(self, dim, hidden_dim, dropout=0.):
     super().__init__()
     self.net = nn.Sequential(
         nn.Conv2d(dim, hidden_dim, 1), nn.Hardswish(),
         DepthWiseConv2d(hidden_dim, hidden_dim, 3, padding=1),
         nn.Hardswish(), nn.Dropout(dropout), nn.Conv2d(hidden_dim, dim, 1),
         nn.Dropout(dropout))
Beispiel #3
0
    def __init__(self, mid_dim=128, img_shape=(32, 32, 3)):
        super().__init__()
        assert img_shape[0] == img_shape[1]
        global_size = int(((img_shape[0] - 2 - 2) / 2 - 2) / 2)
        inp_dim = img_shape[2]

        self.dropout = nn.Dropout(p=0.25)
        self.net = nn.Sequential(
            nn.Conv2d(inp_dim, 32, 3, 1, padding=0, bias=True),
            nn.ReLU(),
            nn_conv2d_avg2(32, 32, 3, 1, padding=0, bias=True),
            nn_conv2d_avg2(32, 48, 3, 1, padding=0, bias=True),

            # nn.BatchNorm2d(48),
            nn.Conv2d(48, mid_dim, global_size, 1, padding=0, bias=True),
            nn.Hardswish(),
            NnnReshape((-1, mid_dim)),
            # nn.BatchNorm1d(mid_dim),
            nn.Linear(mid_dim, mid_dim, bias=True),
            nn.Hardswish(),

            # nn.BatchNorm1d(mid_dim),
            self.dropout,
            nn.Linear(mid_dim, 10, bias=True),
        )
Beispiel #4
0
 def __init__(self, lay_dim):
     super().__init__()
     self.dense1 = nn.Sequential(
         nn.Linear(lay_dim, lay_dim),
         nn.ReLU(),
         nn.Linear(lay_dim, lay_dim),
         nn.Hardswish(),
     )
     self.dense2 = nn.Sequential(
         nn.Linear(lay_dim, lay_dim),
         nn.ReLU(),
         nn.Linear(lay_dim, lay_dim),
         nn.Hardswish(),
     )
     self.dense3 = nn.Sequential(
         nn.Linear(lay_dim, lay_dim),
         nn.ReLU(),
         nn.Linear(lay_dim, lay_dim),
         nn.Hardswish(),
     )
     self.dense4 = nn.Sequential(
         nn.Linear(lay_dim, lay_dim),
         nn.ReLU(),
         nn.Linear(lay_dim, lay_dim),
         nn.Hardswish(),
     )
     self.inp_dim = lay_dim
     self.out_dim = lay_dim * 4
Beispiel #5
0
    def __init__(self, mid_dim=128, img_shape=(32, 32, 3)):
        super().__init__()
        assert img_shape[0] == img_shape[1]
        global_size = int(((img_shape[0] - 2 - 2) / 2 - 2) / 2)
        inp_dim = img_shape[2]

        self.conv0 = nn.Sequential(
            nn.Conv2d(inp_dim, 32, 3, 1, padding=0, bias=True),
            nn.ReLU(),
        )
        self.conv1 = nn_conv2d_bn_avg2(32, 32, 3, 1, padding=0, bias=False)
        self.conv1_se = nn_se_2d(32)
        self.conv2 = nn_conv2d_bn_avg2(32, 48, 3, 1, padding=0, bias=False)
        self.conv2_se = nn_se_2d(48)

        self.conv3 = nn.Sequential(
            nn.BatchNorm2d(48),
            nn.Conv2d(48, mid_dim, global_size, 1, padding=0, bias=False),
            nn.Hardswish(),
        )

        self.dropout = nn.Dropout(p=0.25)
        self.dense0 = nn.Sequential(
            NnnReshape((-1, mid_dim)),
            nn.BatchNorm1d(mid_dim),
            nn.Linear(mid_dim, mid_dim, bias=True),
            nn.Hardswish(),
            nn.BatchNorm1d(mid_dim),
            self.dropout,
            nn.Linear(mid_dim, 10, bias=True),
            nn.LogSoftmax(dim=1),
        )
Beispiel #6
0
    def __init__(self, mid_dim, state_dim, action_dim, if_use_dn=False):
        super().__init__()
        if if_use_dn:  # use a DenseNet (DenseNet has both shallow and deep linear layer)
            nn_dense_net = DenseNet(mid_dim)
            self.net_state = nn.Sequential(
                nn.Linear(state_dim, mid_dim),
                nn.ReLU(),
                nn_dense_net,
            )
            lay_dim = nn_dense_net.out_dim
        else:  # use a simple network. Deeper network does not mean better performance in RL.
            lay_dim = mid_dim
            self.net_state = nn.Sequential(nn.Linear(state_dim, mid_dim),
                                           nn.ReLU(),
                                           nn.Linear(mid_dim, mid_dim),
                                           nn.Hardswish(),
                                           nn.Linear(mid_dim, lay_dim),
                                           nn.Hardswish())
        self.net_a_avg = nn.Linear(lay_dim,
                                   action_dim)  # the average of action
        self.net_a_std = nn.Linear(lay_dim,
                                   action_dim)  # the log_std of action

        self.sqrt_2pi_log = np.log(np.sqrt(2 * np.pi))
        layer_norm(self.net_a_avg,
                   std=0.01)  # output layer for action, it is no necessary.
Beispiel #7
0
    def __init__(self, mid_dim, state_dim, action_dim, if_use_dn=True):
        super().__init__()
        if if_use_dn:
            nn_dense = DenseNet(mid_dim // 2)
            inp_dim = nn_dense.inp_dim
            out_dim = nn_dense.out_dim
            self.net_state = nn.Sequential(
                nn.Linear(state_dim, inp_dim),
                nn.ReLU(),
                nn_dense,
            )
        else:
            out_dim = mid_dim
            self.net_state = nn.Sequential(
                nn.Linear(state_dim, mid_dim),
                nn.ReLU(),
                nn.Linear(mid_dim, mid_dim),
                nn.ReLU(),
                nn.Linear(mid_dim, mid_dim),
                nn.Hardswish(),
            )

        self.net_a_avg = nn.Sequential(
            nn.Linear(out_dim, mid_dim), nn.Hardswish(),
            nn.Linear(mid_dim, action_dim))  # the average of action
        self.net_a_std = nn.Sequential(
            nn.Linear(out_dim, mid_dim), nn.Hardswish(),
            nn.Linear(mid_dim, action_dim))  # the log_std of action
        self.num_logprob = -np.log(
            action_dim
        )  # SAC will adjust alpha to let it's logprob get close to num_logprob
        self.log_sqrt_2pi = np.log(np.sqrt(2 * np.pi))
        self.log_alpha = nn.Parameter(torch.zeros(
            (1, action_dim)) - np.log(action_dim),
                                      requires_grad=True)
Beispiel #8
0
 def __init__(self, act_type, auto_optimize=True, **kwargs):
     super(Activation, self).__init__()
     if act_type == 'relu':
         self.act = nn.ReLU(inplace=True) if auto_optimize else nn.ReLU(**kwargs)
     elif act_type == 'relu6':
         self.act = nn.ReLU6(inplace=True) if auto_optimize else nn.ReLU6(**kwargs)
     elif act_type == 'h_swish':
         self.act = nn.Hardswish(inplace=True) if auto_optimize else nn.Hardswish(**kwargs)
     elif act_type == 'h_sigmoid':
         self.act = nn.Hardsigmoid(inplace=True) if auto_optimize else nn.Hardsigmoid(**kwargs)
     elif act_type == 'swish':
         self.act = nn.SiLU(inplace=True) if auto_optimize else nn.SiLU(**kwargs)
     elif act_type == 'gelu':
         self.act = nn.GELU()
     elif act_type == 'elu':
         self.act = nn.ELU(inplace=True, **kwargs) if auto_optimize else nn.ELU(**kwargs)
     elif act_type == 'mish':
         self.act = Mish()
     elif act_type == 'sigmoid':
         self.act = nn.Sigmoid()
     elif act_type == 'lrelu':
         self.act = nn.LeakyReLU(inplace=True, **kwargs) if auto_optimize else nn.LeakyReLU(**kwargs)
     elif act_type == 'prelu':
         self.act = nn.PReLU(**kwargs)
     else:
         raise NotImplementedError('{} activation is not implemented.'.format(act_type))
Beispiel #9
0
 def __init__(self, state_dim, mid_dim):
     super().__init__()
     self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
                              nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                              nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                              nn.Linear(mid_dim, 1))
     layer_norm(self.net[-1], std=0.5)  # output layer for Q value
Beispiel #10
0
    def __init__(self, state_dim, mid_dim):
        super().__init__()
        if isinstance(state_dim, int):
            self.net = nn.Sequential(
                nn.Linear(state_dim, mid_dim), nn.ReLU(),
                nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                nn.Linear(mid_dim, 1)
            )
        else:
            def set_dim(i):
                return int(12 * 1.5 ** i)

            self.net = nn.Sequential(
                NnnReshape(*state_dim),  # -> [batch_size, 4, 96, 96]
                nn.Conv2d(state_dim[0], set_dim(0), 4, 2, bias=True), nn.LeakyReLU(),
                nn.Conv2d(set_dim(0), set_dim(1), 3, 2, bias=False), nn.ReLU(),
                nn.Conv2d(set_dim(1), set_dim(2), 3, 2, bias=False), nn.ReLU(),
                nn.Conv2d(set_dim(2), set_dim(3), 3, 2, bias=True), nn.ReLU(),
                nn.Conv2d(set_dim(3), set_dim(4), 3, 1, bias=True), nn.ReLU(),
                nn.Conv2d(set_dim(4), set_dim(5), 3, 1, bias=True), nn.ReLU(),
                NnnReshape(-1),
                nn.Linear(set_dim(5), mid_dim), nn.ReLU(),
                nn.Linear(mid_dim, 1),
            )

        layer_norm(self.net[-1], std=0.5)  # output layer for q value
Beispiel #11
0
    def __init__(self, mid_dim, state_dim, action_dim):
        super().__init__()
        if isinstance(state_dim, int):
            self.net = nn.Sequential(
                nn.Linear(state_dim, mid_dim), nn.ReLU(),
                nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                nn.Linear(mid_dim, action_dim), )
        else:
            def set_dim(i):
                return int(12 * 1.5 ** i)

            self.net = nn.Sequential(
                NnnReshape(*state_dim),  # -> [batch_size, 4, 96, 96]
                nn.Conv2d(state_dim[0], set_dim(0), 4, 2, bias=True), nn.LeakyReLU(),
                nn.Conv2d(set_dim(0), set_dim(1), 3, 2, bias=False), nn.ReLU(),
                nn.Conv2d(set_dim(1), set_dim(2), 3, 2, bias=False), nn.ReLU(),
                nn.Conv2d(set_dim(2), set_dim(3), 3, 2, bias=True), nn.ReLU(),
                nn.Conv2d(set_dim(3), set_dim(4), 3, 1, bias=True), nn.ReLU(),
                nn.Conv2d(set_dim(4), set_dim(5), 3, 1, bias=True), nn.ReLU(),
                NnnReshape(-1),
                nn.Linear(set_dim(5), mid_dim), nn.ReLU(),
                nn.Linear(mid_dim, action_dim), )
        self.a_std_log = nn.Parameter(torch.zeros((1, action_dim)) - 0.5, requires_grad=True)  # trainable parameter
        self.sqrt_2pi_log = 0.9189385332046727  # =np.log(np.sqrt(2 * np.pi))

        layer_norm(self.net[-1], std=0.1)  # output layer for action
Beispiel #12
0
    def __init__(self, mid_dim, state_dim, action_dim, if_use_dn=False):
        super().__init__()
        if if_use_dn:
            nn_dense = DenseNet(mid_dim // 2)
            inp_dim = nn_dense.inp_dim
            out_dim = nn_dense.out_dim

            self.net_state = nn.Sequential(
                nn.Linear(state_dim, inp_dim),
                nn.ReLU(),
                nn_dense,
            )
        else:
            self.net_state = nn.Sequential(nn.Linear(state_dim, mid_dim),
                                           nn.ReLU(),
                                           nn.Linear(mid_dim, mid_dim),
                                           nn.Hardswish(),
                                           nn.Linear(mid_dim, mid_dim),
                                           nn.Hardswish())
            out_dim = mid_dim

        self.net_a_avg = nn.Linear(out_dim,
                                   action_dim)  # the average of action
        self.net_a_std = nn.Linear(out_dim,
                                   action_dim)  # the log_std of action

        self.sqrt_2pi_log = np.log(np.sqrt(2 * np.pi))
        layer_norm(self.net_a_avg,
                   std=0.01)  # output layer for action, it is no necessary.
Beispiel #13
0
 def __init__(self, lay_dim):
     super().__init__()
     self.dense1 = nn.Sequential(nn.Linear(lay_dim * 1, lay_dim * 1),
                                 nn.Hardswish())
     self.dense2 = nn.Sequential(nn.Linear(lay_dim * 2, lay_dim * 2),
                                 nn.Hardswish())
     self.inp_dim = lay_dim
     self.out_dim = lay_dim * 4
 def __init__(self, in_ch, out_ch):
     super(EncoderConv, self).__init__()
     self.conv1 = nn.Conv2d(in_ch, out_ch, 3, padding=1)
     self.conv2 = nn.Sequential(nn.Conv2d(out_ch, out_ch, 3, padding=1),
                                nn.BatchNorm2d(out_ch), nn.Hardswish(),
                                nn.Conv2d(out_ch, out_ch, 3, padding=1),
                                nn.BatchNorm2d(out_ch), nn.Hardswish())
     self.norm = nn.BatchNorm2d(out_ch)
Beispiel #15
0
 def __init__(self, state_dim, action_dim, mid_dim):
     super().__init__()
     self.net_sa = nn.Sequential(nn.Linear(state_dim + action_dim, mid_dim), nn.ReLU(),
                                 nn.Linear(mid_dim, mid_dim), nn.ReLU(), )  # concat(state, action)
     self.net_q1 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                                 nn.Linear(mid_dim, 1), )  # q1 value
     self.net_q2 = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                                 nn.Linear(mid_dim, 1), )  # q2 value
Beispiel #16
0
 def __init__(self, mid_dim):
     super().__init__()
     self.dense1 = nn.Sequential(nn.Linear(mid_dim // 2, mid_dim // 2),
                                 nn.Hardswish())
     self.dense2 = nn.Sequential(nn.Linear(mid_dim * 1, mid_dim * 1),
                                 nn.Hardswish())
     self.inp_dim = mid_dim // 2
     self.out_dim = mid_dim * 2
Beispiel #17
0
 def __init__(self, mid_dim, state_dim, action_dim):
     super().__init__()
     self.net__state = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
                                     nn.Linear(mid_dim, mid_dim), nn.ReLU())
     self.net__a_avg = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                                     nn.Linear(mid_dim, action_dim))  # the average of action
     self.net__a_std = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                                     nn.Linear(mid_dim, action_dim))  # the log_std of action
     self.sqrt_2pi_log = 0.9189385332046727  # =np.log(np.sqrt(2 * np.pi))
Beispiel #18
0
    def __init__(self, state_dim, action_dim, mid_dim):
        super().__init__()
        self.net__s = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
                                    nn.Linear(mid_dim, mid_dim), nn.ReLU(), )  # network of state
        self.net__a = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                                    nn.Linear(mid_dim, action_dim), )  # network of action_average
        self.net__d = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                                    nn.Linear(mid_dim, action_dim), )  # network of action_log_std

        self.sqrt_2pi_log = np.log(np.sqrt(2 * np.pi))  # it is a constant
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Beispiel #19
0
 def __init__(self, mid_dim, state_dim, action_dim):
     super().__init__()
     self.net_state = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
                                    nn.Linear(mid_dim, mid_dim), nn.ReLU())
     self.net_a_avg = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                                    nn.Linear(mid_dim, action_dim))  # the average of action
     self.net_a_std = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
                                    nn.Linear(mid_dim, action_dim))  # the log_std of action
     self.num_logprob = -np.log(action_dim)
     self.log_sqrt_2pi = np.log(np.sqrt(2 * np.pi))
     self.log_alpha = nn.Parameter(torch.zeros((1, action_dim)) - np.log(action_dim), requires_grad=True)
Beispiel #20
0
    def __init__(self):
        super(CNNBasedNet, self).__init__()

        self.conv = nn.Sequential(conv_block(6, 64, 5), conv_block(64, 128, 3),
                                  nn.AvgPool1d(2), conv_block(128, 256, 3),
                                  nn.AdaptiveAvgPool1d(1), nn.Flatten(1),
                                  nn.Linear(256, 512), nn.Dropout(0.2),
                                  nn.Hardswish(inplace=True),
                                  nn.Linear(512, 1024), nn.Dropout(0.2),
                                  nn.Hardswish(inplace=True),
                                  nn.Linear(1024, 3))
Beispiel #21
0
    def __init__(self):
        super(MLPBasedNet, self).__init__()

        channels = 6
        sequence = 120
        Q = channels * sequence
        self.linear_block = nn.Sequential(nn.Linear(Q, 2 * Q), nn.Hardswish(),
                                          nn.Dropout(0.2),
                                          nn.Linear(2 * Q, 4 * Q),
                                          nn.Hardswish(), nn.Dropout(0.2),
                                          nn.Linear(4 * Q, 4 * Q),
                                          nn.Hardswish(), nn.Dropout(0.2),
                                          nn.Linear(4 * Q, 3))
Beispiel #22
0
    def __init__(self, mid_dim):
        super().__init__()
        assert (mid_dim / (2 ** 3)) % 1 == 0

        def set_dim(i):
            return int((3 / 2) ** i * mid_dim)

        self.dense1 = nn.Sequential(nn.Linear(set_dim(0), set_dim(0) // 2), nn.Hardswish())
        self.dense2 = nn.Sequential(nn.Linear(set_dim(1), set_dim(1) // 2), nn.Hardswish())
        self.out_dim = set_dim(2)

        layer_norm(self.dense1[0], std=1.0)
        layer_norm(self.dense2[0], std=1.0)
Beispiel #23
0
    def __init__(self, in_c, num_classes):
        super(MobileNetV3, self).__init__()

        self.blc1 = nn.Sequential(
            nn.Conv2d(in_c, 16, kernel_size=3, stride=1, padding=1,
                      bias=False), nn.BatchNorm2d(16), nn.Hardswish(True))
        self.blc2 = nn.Sequential(SEInvBottleneck(16, 16, 16, act='relu'),
                                  SEInvBottleneck(16, 64, 24, act='relu'),
                                  SEInvBottleneck(24, 72, 24, act='relu'))
        self.blc3 = nn.Sequential(
            SEInvBottleneck(24, 72, 40, k=5, s=2, p=2, act='relu', se=True),
            SEInvBottleneck(40, 120, 40, k=5, s=1, p=2, act='relu', se=True),
            SEInvBottleneck(40, 120, 40, k=5, s=1, p=2, act='relu', se=True))
        self.blc4 = nn.Sequential(
            SEInvBottleneck(40, 240, 80, s=2, act='hswish'),
            SEInvBottleneck(80, 200, 80, act='hswish'),
            SEInvBottleneck(80, 184, 80, act='hswish'),
            SEInvBottleneck(80, 184, 80, act='hswish'),
            SEInvBottleneck(80, 480, 112, act='hswish', se=True),
            SEInvBottleneck(112, 672, 112, act='hswish', se=True))
        self.blc5 = nn.Sequential(
            SEInvBottleneck(112,
                            672,
                            160,
                            k=5,
                            s=2,
                            p=2,
                            act='hswish',
                            se=True),
            SEInvBottleneck(160,
                            960,
                            160,
                            k=5,
                            s=1,
                            p=2,
                            act='hswish',
                            se=True),
            SEInvBottleneck(160,
                            960,
                            160,
                            k=5,
                            s=1,
                            p=2,
                            act='hswish',
                            se=True),
            nn.Conv2d(160, 960, kernel_size=1, bias=False),
            nn.BatchNorm2d(960), nn.Hardswish(True))
        self.gap = nn.AdaptiveAvgPool2d(1)
        self.fc1 = nn.Sequential(nn.Conv2d(960, 1280, kernel_size=1),
                                 nn.Hardswish(True))
        self.fc2 = nn.Conv2d(1280, num_classes, kernel_size=1)
Beispiel #24
0
 def __init__(
     self, c1, c2, k=1, s=1, p=None, g=1, act=True
 ):  # ch_in, ch_out, kernel, stride, padding, groups
     super(Conv, self).__init__()
     self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
     self.bn = nn.BatchNorm2d(c2)
     self.act = nn.Hardswish() if act else nn.Identity()
 def __init__(self, in_channels, out_channels, kernel_size = 3, stride = 1, expansion_ratio = 1, squeeze_ratio = 1, \
     activation = nn.Hardswish(True), normalization = nn.BatchNorm2d):
     super().__init__()
     self.same_shape = in_channels == out_channels
     self.mid_channels = expansion_ratio*in_channels
     self.block = nn.Sequential(
         md.PointWiseConv2d(in_channels, self.mid_channels),
         normalization(self.mid_channels),
         activation,
         md.DepthWiseConv2d(self.mid_channels, kernel_size=kernel_size, stride=stride),
         normalization(self.mid_channels),
         activation,
         #md.sSEModule(self.mid_channels),
         md.SCSEModule(self.mid_channels, reduction = squeeze_ratio),
         #md.SEModule(self.mid_channels, reduction = squeeze_ratio),
         md.PointWiseConv2d(self.mid_channels, out_channels),
         normalization(out_channels)
     )
     
     if not self.same_shape:
         # 1x1 convolution used to match the number of channels in the skip feature maps with that 
         # of the residual feature maps
         self.skip_conv = nn.Sequential(
             nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1),
             normalization(out_channels)
         )
Beispiel #26
0
 def __init__(self,
              c1,
              c2,
              k=1,
              s=1,
              p=None,
              g=1,
              act=True,
              version="r4.0"):
     super().__init__()
     self.conv = nn.Conv2d(c1,
                           c2,
                           k,
                           s,
                           autopad(k, p),
                           groups=g,
                           bias=False)
     self.bn = nn.BatchNorm2d(c2)
     if version == "r4.0":
         self.act = nn.SiLU() if act else (
             act if isinstance(act, nn.Module) else nn.Identity())
     elif version == "r3.1":
         self.act = nn.Hardswish() if act else (
             act if isinstance(act, nn.Module) else nn.Identity())
     else:
         raise NotImplementedError(
             f"Currently doesn't support version {version}.")
Beispiel #27
0
 def __init__(self):
     super(NNActivationModule, self).__init__()
     self.activations = nn.ModuleList([
         nn.ELU(),
         nn.Hardshrink(),
         nn.Hardsigmoid(),
         nn.Hardtanh(),
         nn.Hardswish(),
         nn.LeakyReLU(),
         nn.LogSigmoid(),
         # nn.MultiheadAttention(),
         nn.PReLU(),
         nn.ReLU(),
         nn.ReLU6(),
         nn.RReLU(),
         nn.SELU(),
         nn.CELU(),
         nn.GELU(),
         nn.Sigmoid(),
         nn.SiLU(),
         nn.Mish(),
         nn.Softplus(),
         nn.Softshrink(),
         nn.Softsign(),
         nn.Tanh(),
         nn.Tanhshrink(),
         # nn.Threshold(0.1, 20),
         nn.GLU(),
         nn.Softmin(),
         nn.Softmax(),
         nn.Softmax2d(),
         nn.LogSoftmax(),
         # nn.AdaptiveLogSoftmaxWithLoss(),
     ])
Beispiel #28
0
    def __init__(self):
        super(classifier, self).__init__()

        self.pool = nn.MaxPool2d(3, 3)  # define pool as max 2x2

        self.dropout1 = nn.Dropout2d(p=0.2)  # spatial dropout
        self.dropout2 = nn.Dropout2d(p=0.5)

        self.relu = nn.LeakyReLU()
        self.softmax = nn.Softmax()
        self.swish = nn.Hardswish()
        #self.adpool = nn.AdaptiveAvgPool2d(((1,1)))

        self.conv1 = nn.Conv2d(
            2, s, 3,
            padding=1)  # i.e. input channel, output channels, Kernel size
        self.batch1 = nn.BatchNorm2d(s)  # batch normalisation

        self.conv2 = nn.Conv2d(s, 2 * s, 3, padding=1)
        self.batch2 = nn.BatchNorm2d(2 * s)

        self.conv3 = nn.Conv2d(2 * s, 4 * s, 3, padding=1)
        self.batch3 = nn.BatchNorm2d(4 * s)

        self.conv4 = nn.Conv2d(4 * s, 8 * s, 3, padding=1)
        self.batch4 = nn.BatchNorm2d(8 * s)

        self.adpool = nn.AdaptiveAvgPool2d((1, 1))
        self.conv5 == nn.Conv2d(8 * s, 2, 3, padding=1)

        self.fc1 = nn.Linear(in_features=8 * s * 1 * 1,
                             out_features=64)  # pool
        self.fc2 = nn.Linear(in_features=64, out_features=16)
        self.out = nn.Linear(in_features=16, out_features=2)
 def test_qconfig_dict_object_type_module(self):
     """
     Verifies that the 'object_type' option of qconfig_dict works
     on module types.
     """
     m = nn.Sequential(
         nn.Conv2d(1, 1, 1),
         nn.Hardswish(),
         nn.Conv2d(1, 1, 1),
     )
     qconfig_dict = {
         '': torch.quantization.default_qconfig,
         'object_type': [
             (nn.Conv2d, torch.quantization.default_qconfig),
             (nn.Hardswish, None),
         ],
     }
     example_args = (torch.randn(1, 1, 1, 1),)
     mp = _quantize_dbr.prepare(m, qconfig_dict, example_args)
     mp(*example_args)
     mq = _quantize_dbr.convert(mp)
     mq(*example_args)
     self.assertTrue(isinstance(mq[0], nnq.Conv2d))
     self.assertTrue(isinstance(mq[1], nn.Hardswish))
     self.assertTrue(isinstance(mq[2], nnq.Conv2d))
Beispiel #30
0
    def __init__(self, state_dim, mid_dim):
        super().__init__()
        if isinstance(state_dim, int):
            self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
                                     nn.Linear(mid_dim, mid_dim), nn.ReLU(),
                                     nn.Linear(mid_dim, mid_dim),
                                     nn.Hardswish(), nn.Linear(mid_dim, 1))
        else:

            def set_dim(i):
                return int(12 * 1.5**i)

            self.net0 = nn.Sequential(  # NnnReshape(*state_dim),  # -> [batch_size, 4, 96, 96]
                nn.Conv2d(state_dim[0], set_dim(0), 4, 2, bias=True),
                nn.LeakyReLU(),
                nn.Conv2d(set_dim(0), set_dim(0), 3, 2, bias=False), nn.ReLU(),
                nn.Conv2d(set_dim(0), set_dim(1), 3, 2, bias=False), nn.ReLU(),
                nn.Flatten())

            x = torch.zeros(1, *(state_dim))
            x = self.net0(x)
            netLen = int(np.prod(x.shape))

            self.net1 = nn.Sequential(
                nn.Linear(netLen, mid_dim), nn.ReLU(),
                nn.Linear(mid_dim, int(mid_dim / 2)), nn.ReLU(),
                nn.Linear(int(mid_dim / 2), int(mid_dim / 2**2)), nn.ReLU(),
                nn.Linear(int(mid_dim / 2**2), int(mid_dim / 2**3)), nn.ReLU(),
                nn.Linear(int(mid_dim / 2**3), 1))

        layer_norm(self.net1[-1], std=0.5)  # output layer for Q value