예제 #1
0
 def __init__(self, act_type, auto_optimize=True, **kwargs):
     super(Activation, self).__init__()
     if act_type == 'relu':
         self.act = nn.ReLU(inplace=True) if auto_optimize else nn.ReLU(**kwargs)
     elif act_type == 'relu6':
         self.act = nn.ReLU6(inplace=True) if auto_optimize else nn.ReLU6(**kwargs)
     elif act_type == 'h_swish':
         self.act = nn.Hardswish(inplace=True) if auto_optimize else nn.Hardswish(**kwargs)
     elif act_type == 'h_sigmoid':
         self.act = nn.Hardsigmoid(inplace=True) if auto_optimize else nn.Hardsigmoid(**kwargs)
     elif act_type == 'swish':
         self.act = nn.SiLU(inplace=True) if auto_optimize else nn.SiLU(**kwargs)
     elif act_type == 'gelu':
         self.act = nn.GELU()
     elif act_type == 'elu':
         self.act = nn.ELU(inplace=True, **kwargs) if auto_optimize else nn.ELU(**kwargs)
     elif act_type == 'mish':
         self.act = Mish()
     elif act_type == 'sigmoid':
         self.act = nn.Sigmoid()
     elif act_type == 'lrelu':
         self.act = nn.LeakyReLU(inplace=True, **kwargs) if auto_optimize else nn.LeakyReLU(**kwargs)
     elif act_type == 'prelu':
         self.act = nn.PReLU(**kwargs)
     else:
         raise NotImplementedError('{} activation is not implemented.'.format(act_type))
예제 #2
0
	def __init__(self,input_shape,num_feature,num_class,transfered=None):
		super(VGG16,self).__init__()

		#load transfered encoder
		vgg16 = torchvision.models.vgg16_bn(pretrained=False)
		if transfered:
			vgg16.load_state_dict(torch.load(transfered))

		self.conv = vgg16.features

		self.fc1 = nn.Sequential( #512*(input_shape[1]/32)*(input_shape[2]/32)
			nn.Linear(512*(input_shape[1]//32)*(input_shape[2]//32),num_feature)	,
			nn.ReLU()								,
			nn.Dropout(0.5)
		) #num_feature

		self.fc2 = nn.Linear(num_feature,num_class)

		self.confidnet = nn.Sequential( #num_feature
			nn.Linear(num_feature,400)	,
			nn.ReLU()			,
			nn.Linear(400,400)		,
			nn.ReLU()			,
			nn.Linear(400,400)		,
			nn.ReLU()			,
			nn.Linear(400,400)		,
			nn.ReLU()			,
			nn.Linear(400,1)		,
			nn.Hardsigmoid()
		) #1
예제 #3
0
파일: model.py 프로젝트: k5iogura/OLDY2020
    def __init__(self, in_channels_x, in_channels_f, num_classes):
        super(R_ASPP_module, self).__init__()

        assert not num_classes % 2

        self.layer1 = nn.Sequential(
            nn.Conv2d(in_channels_x, 128, kernel_size=1, stride=1),
            nn.BatchNorm2d(128),
        )

        self._act = nn.ReLU6()

        self.avgpool = nn.AdaptiveAvgPool2d(1)
        self.layer2 = nn.Conv2d(in_channels_x, 128, kernel_size=1, stride=1)

        self.out_conv1 = nn.Conv2d(128,
                                   num_classes // 2,
                                   kernel_size=1,
                                   stride=1)
        self.out_conv2 = nn.Conv2d(in_channels_f,
                                   num_classes // 2,
                                   kernel_size=1,
                                   stride=1)

        self.hsigmoid = nn.Hardsigmoid()

        self._init_weight()
예제 #4
0
파일: nn_ops.py 프로젝트: yuguo68/pytorch
 def __init__(self):
     super(NNActivationModule, self).__init__()
     self.activations = nn.ModuleList([
         nn.ELU(),
         nn.Hardshrink(),
         nn.Hardsigmoid(),
         nn.Hardtanh(),
         nn.Hardswish(),
         nn.LeakyReLU(),
         nn.LogSigmoid(),
         # nn.MultiheadAttention(),
         nn.PReLU(),
         nn.ReLU(),
         nn.ReLU6(),
         nn.RReLU(),
         nn.SELU(),
         nn.CELU(),
         nn.GELU(),
         nn.Sigmoid(),
         nn.SiLU(),
         nn.Mish(),
         nn.Softplus(),
         nn.Softshrink(),
         nn.Softsign(),
         nn.Tanh(),
         nn.Tanhshrink(),
         # nn.Threshold(0.1, 20),
         nn.GLU(),
         nn.Softmin(),
         nn.Softmax(),
         nn.Softmax2d(),
         nn.LogSoftmax(),
         # nn.AdaptiveLogSoftmaxWithLoss(),
     ])
예제 #5
0
    def __init__(self,
                 window_size=N,
                 hidden=128,
                 embed=32,
                 learning_rate=5e-3):
        super(agent, self).__init__()

        self.window_size = window_size
        self.hidden = hidden
        self.embed = embed
        self.learning_rate = learning_rate

        self.encoder = nn.Sequential(
            nn.Linear((self.window_size * 2 + 1)**2 * 6, hidden),
            nn.ReLU(True), nn.Linear(hidden, hidden), nn.ReLU(True),
            nn.Linear(hidden, embed))

        self.decoder = nn.Sequential(
            nn.Linear(embed, hidden), nn.ReLU(True), nn.Linear(hidden, hidden),
            nn.ReLU(True), nn.Linear(hidden,
                                     (self.window_size * 2 + 1)**2 * 6),
            nn.Hardsigmoid())

        self.optimizer = torch.optim.Adam(self.parameters(),
                                          lr=self.learning_rate,
                                          weight_decay=1e-5)
    def __init__(
            self,
            low_filters: int,
            high_filters: int,
            output_filters: Optional[int] = None,
            kernel_size: Union[int, Tuple[int, ...]] = 3,
            pool: bool = True,
            activation: Optional[nn.Module] = nn.Hardsigmoid(),
    ):
        super().__init__()
        output_filters = high_filters if output_filters is None else output_filters
        kernel_size = self.ToTuple(kernel_size)
        self.align_corners = False
        self._pool = pool

        self.low_level_path = nn.Sequential(
            DynamicSamePad(
                self.Conv(low_filters, output_filters, kernel_size,
                          bias=False)), )

        # gets weights for low_level
        self.high_level_path = nn.Sequential(
            self.AdaptiveAvgPool(1) if pool else nn.Identity(),
            self.Conv(output_filters, output_filters, 1),
            deepcopy(activation) if activation is not None else nn.Identity(),
        )

        # pointwise conv when high_filters != output_filters
        if high_filters != output_filters:
            self.high_level_conv = self.Conv(high_filters, output_filters, 1)
        else:
            self.high_level_conv = nn.Identity()
예제 #7
0
 def __init__(self, k, num_channels, norm=None, group=1, use_hsig=True):
     super(AttentionWeights, self).__init__()
     # num_channels *= 2
     self.k = k
     self.avgpool = nn.AdaptiveAvgPool2d(1)
     self.attention = nn.Sequential(
         nn.Conv2d(num_channels, k, 1, bias=False),
         nn.BatchNorm2d(k) if norm == 'BN' else nn.GroupNorm(group, k),
         nn.Hardsigmoid() if use_hsig else nn.Sigmoid())
예제 #8
0
 def __init__(self, in_c, reduction_ratio=0.25):
     super(SE, self).__init__()
     reducation_c = make_divisible(in_c * reduction_ratio, 4)
     self.block = nn.Sequential(
         nn.AdaptiveAvgPool2d(1),
         nn.Conv2d(in_c, reducation_c, kernel_size=1, bias=True),
         nn.ReLU(inplace=True),
         nn.Conv2d(reducation_c, in_c, kernel_size=1, bias=True),
         nn.Hardsigmoid())
예제 #9
0
    def __init__(self, in_channel, ratio=0.25):
        super(SEBlock, self).__init__()

        self.hid_dim = max(1, int(in_channel * ratio))

        self._pool = nn.AdaptiveAvgPool2d(1)
        self._ln1  = nn.Linear(in_channel, self.hid_dim)
        self._ln2  = nn.Linear(self.hid_dim, in_channel)

        self._act1 = nn.ReLU()
        self._act2 = nn.Hardsigmoid() # we dont not like sigmoid hihi :^)
    def __init__(self, expand_size):
        super(SqueezeExciteModule, self).__init__()

        self.se_0_0 = nn.AdaptiveAvgPool2d(output_size=1)
        self.se_0_1 = nn.Flatten()

        self.se_1_0 = nn.Linear(in_features=expand_size,
                                out_features=expand_size)
        self.se_1_1 = nn.ReLU(inplace=True)

        self.se_2_0 = nn.Linear(in_features=expand_size,
                                out_features=expand_size)
        self.se_2_1 = nn.Hardsigmoid(inplace=True)
예제 #11
0
 def __init__(self):
     super(Decoder, self).__init__()
     self.upconv_bn_relu_1 = nn.Sequential(
         nn.ConvTranspose2d(64, 32, 4), # 4x4
         nn.BatchNorm2d(32),
         nn.ReLU()
     )
     self.upconv_bn_relu_2 = nn.Sequential(
         nn.ConvTranspose2d(32, 4, 6, stride=2), # 12x12
         nn.BatchNorm2d(32),
         nn.ReLU()
     )
     self.upconv_3 = nn.ConvTranspose2d(4, 1, 6, stride=2) # 28x28
     self.hard_sigmoid = nn.Hardsigmoid()
예제 #12
0
def build_act(name: Union[str, nn.Module, None]) -> Optional[nn.Module]:
    if name is None:
        return None
    elif isinstance(name, nn.Module):
        return name
    elif name == "relu":
        return nn.ReLU(inplace=True)
    elif name == "relu6":
        return nn.ReLU6(inplace=True)
    elif name == "h_swish":
        return nn.Hardswish(inplace=True)
    elif name == "h_sigmoid":
        return nn.Hardsigmoid(inplace=True)
    else:
        raise NotImplementedError
예제 #13
0
    def __init__(
        self,
        in_channels: int,
        squeeze_ratio: float,
        out_channels: Optional[int] = None,
        first_activation: nn.Module = nn.ReLU(),
        second_activation: nn.Module = nn.Hardsigmoid(),
        global_pool: bool = True,
        pool_type: Union[str, type] = "avg",
    ):
        super().__init__()
        self.in_channels = abs(int(in_channels))
        self.squeeze_ratio = abs(float(squeeze_ratio))
        self.out_channels = self.in_channels if out_channels is None else abs(
            int(out_channels))

        mid_channels = int(max(1, in_channels // squeeze_ratio))

        # get pooling type from str or provided module type
        if isinstance(pool_type, str):
            if pool_type == "avg":
                self.pool_type = self.AdaptiveAvgPool
            elif pool_type == "max":
                self.pool_type = self.AdaptiveMaxPool
            else:
                raise ValueError(f"Unknown pool type {pool_type}")
        elif isinstance(pool_type, type):
            self.pool_type = pool_type
        else:
            raise TypeError(
                f"Expected str or type for pool_type, found {type(pool_type)}")

        # for global attention, squeeze uses global pooling and linear layers
        if global_pool:
            self.pool = self._get_global_pool()
        else:
            self.pool = None

        self.squeeze = nn.Sequential(
            self.Conv(self.in_channels, mid_channels, 1),
            deepcopy(first_activation),
        )
        self.excite = nn.Sequential(
            self.Conv(mid_channels, self.out_channels, 1),
            deepcopy(second_activation),
        )
예제 #14
0
def make_act(act='ReLU', **kwargs):
    inplace = kwargs.pop("inplace", True)

    if len(act) == 0:
        return None
    act = {
        "ReLU": nn.ReLU(inplace=inplace),
        "ReLU6": nn.ReLU6(inplace=inplace),
        "PReLU": nn.PReLU(),
        "LeakyReLU": nn.LeakyReLU(inplace=inplace),
        "H_Sigmoid": nn.Hardsigmoid(),
        "Sigmoid": nn.Sigmoid(),
        "H_Swish": nn.Hardswish(),
        "Swish": Swish(),
        "Mish": Mish(),
    }[act]

    return act
예제 #15
0
 def activations(self):
     return {
         "celu": nn.CELU(),
         "gelu": nn.GELU(),
         "lrelu": nn.LeakyReLU(0.2),
         "prelu": nn.PReLU(),
         "relu": nn.ReLU(),
         "relu6": nn.ReLU6(),
         "selu": nn.SELU(),
         "sigmoid": nn.Sigmoid(),
         "softplus": nn.Softplus(),
         "softshrink": nn.Softshrink(),
         "softsign": nn.Softsign(),
         "hardsigmoid": nn.Hardsigmoid(),
         "hardtanh": nn.Hardtanh(),
         "tanh": nn.Tanh(),
         "tanhshrink": nn.Tanhshrink()
     }
예제 #16
0
    def __init__(self, input_shape, num_feature, num_class, transfered=None):
        super(ConvNetMNIST, self).__init__()

        self.conv = nn.Sequential(  #input_shape[0]*input_shape[1]*input_shape[2]
            Conv2dSame(input_shape[0], 32, 3),
            nn.ReLU(), Conv2dSame(32, 64, 3), nn.ReLU(), nn.MaxPool2d(2),
            nn.Dropout(0.25))  #64*(input_shape[1]/2)*(input_shape[2]/2)

        self.fc1 = nn.Sequential(  #64*(input_shape[1]/2)*(input_shape[2]/2)
            nn.Linear(64 * (input_shape[1] // 2) * (input_shape[2] // 2),
                      num_feature), nn.ReLU(), nn.Dropout(0.5))  #num_feature

        self.fc2 = nn.Linear(num_feature, num_class)

        self.confidnet = nn.Sequential(  #num_feature
            nn.Linear(num_feature, 400), nn.ReLU(), nn.Linear(400, 400),
            nn.ReLU(), nn.Linear(400, 400), nn.ReLU(), nn.Linear(400, 400),
            nn.ReLU(), nn.Linear(400, 1), nn.Hardsigmoid())  #1
예제 #17
0
    def __init__(self,
                 shallow_in_c: int,
                 deep_in_c: int,
                 out_c: int,
                 hidden_c: int = 128):
        super().__init__()

        self.deep_conv_1 = ConvBN(deep_in_c,
                                  hidden_c,
                                  1,
                                  activation=nn.ReLU6())
        self.squeeze_excite = nn.Sequential(
            nn.AdaptiveAvgPool2d((2, 2)),
            nn.Conv2d(deep_in_c, hidden_c, 1),
            nn.Hardsigmoid(),
        )
        self.deep_conv_2 = nn.Conv2d(hidden_c, out_c, 1)
        self.shallow_conv = nn.Conv2d(shallow_in_c, out_c, 1)
예제 #18
0
파일: blocks.py 프로젝트: oqian/Project_449
    def __init__(self, in_dim, out_dim, ks, st, padding=0,
                 norm='bn', activation='relu', pad_type='zero',
                 use_bias=True, activation_first=False):
        super(Conv2dBlock, self).__init__()
        self.use_bias = use_bias
        self.activation_first = activation_first
        # initialize padding
        if pad_type == 'reflect':
            self.pad = nn.ReflectionPad2d(padding)
        elif pad_type == 'replicate':
            self.pad = nn.ReplicationPad2d(padding)
        elif pad_type == 'zero':
            self.pad = nn.ZeroPad2d(padding)
        else:
            assert 0, "Unsupported padding type: {}".format(pad_type)

        # initialize normalization
        norm_dim = out_dim
        if norm == 'bn':
            self.norm = nn.BatchNorm2d(norm_dim)
        elif norm == 'in':
            self.norm = nn.InstanceNorm2d(norm_dim)
        elif norm == 'none':
            self.norm = None
        else:
            assert 0, "Unsupported normalization: {}".format(norm)

        # initialize activation
        if activation == 'relu':
            self.activation = nn.ReLU(inplace=True)
        elif activation == 'lrelu':
            self.activation = nn.LeakyReLU(0.2, inplace=True)
        elif activation == 'tanh':
            self.activation = nn.Tanh()
        elif activation == "hardsigmoid":
            self.activation = nn.Hardsigmoid()
        elif activation == "sigmoid":
            self.activation = nn.Sigmoid()
        elif activation == 'none':
            self.activation = None
        else:
            assert 0, "Unsupported activation: {}".format(activation)

        self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
예제 #19
0
def make_act(act='ReLU', **kwargs):
    inplace = kwargs.pop("inplace", True)

    if len(act) == 0:
        return None
    act = {
        "ReLU": nn.ReLU(inplace=inplace),
        "ReLU6": nn.ReLU6(inplace=inplace),
        "PReLU": nn.PReLU(),
        "LeakyReLU": nn.LeakyReLU(inplace=inplace),
        "H_Sigmoid": nn.Hardsigmoid(),
        "Sigmoid": nn.Sigmoid(),
        "TanH": nn.Tanh(),
        "H_Swish": nn.Hardswish(),
        "Swish": ops.Swish(),   # torch >= 1.7.0, nn.SiLU()
        "Mish": ops.Mish(),
    }[act]

    return act
예제 #20
0
def _get_function(name: str, inplace: bool) -> nn.Module:
    """ Use this to instantiate activations and gating functions by name. """
    if name == "relu":
        return nn.ReLU(inplace=inplace)
    elif name == "relu6":
        return nn.ReLU6(inplace=inplace)
    elif name == "swish":
        return Swish(inplace=inplace)
    elif name == "hard_swish":
        if _HAS_HARDSWISH:
            return nn.Hardsiwsh()
        else:
            return HardSwish(inplace=inplace)
    elif name == "sigmoid":
        return Sigmoid(inplace=inplace)
    elif name == "hard_sigmoid":
        if _HAS_HARDSWISH:
            return nn.Hardsigmoid()
        else:
            return HardSigmoid(inplace=inplace)
예제 #21
0
	def __init__(self,input_shape,num_feature,num_class,transfered=None):
		super(ConvNetSVHN2,self).__init__()

		self.conv = nn.Sequential( #input_shape[0]*input_shape[1]*input_shape[2]
			Conv2dSame_BN_ReLU(input_shape[0],32,3)	,
			Conv2dSame_BN_ReLU(32,32,3)		,
			nn.MaxPool2d(2)				,
			nn.Dropout(0.3)				,
			Conv2dSame_BN_ReLU(32,64,3)		,
			Conv2dSame_BN_ReLU(64,64,3)		,
			nn.MaxPool2d(2)				,
			nn.Dropout(0.3)				,
			Conv2dSame_BN_ReLU(64,128,3)		,
			Conv2dSame_BN_ReLU(128,128,3)		,
			nn.MaxPool2d(2)				,
			nn.Dropout(0.3)
		) #128*(input_shape[1]/8)*(input_shape[2]/8)

		self.fc1 = nn.Sequential( #128*(input_shape[1]/8)*(input_shape[2]/8)
			nn.Linear(128*(input_shape[1]//8)*(input_shape[2]//8),num_feature)	,
			nn.ReLU()								,
			nn.Dropout(0.3)
		) #num_feature

		self.fc2 = nn.Linear(num_feature,num_class)

		self.confidnet = nn.Sequential( #num_feature
			nn.Linear(num_feature,400)	,
			nn.ReLU()			,
			nn.Linear(400,400)		,
			nn.ReLU()			,
			nn.Linear(400,400)		,
			nn.ReLU()			,
			nn.Linear(400,400)		,
			nn.ReLU()			,
			nn.Linear(400,1)		,
			nn.Hardsigmoid()
		) #1
 def __init__(self):
     # 将hard sigmoid 放在 [-1, 1]
     super(CreateEdges, self).__init__()
     self.hardsig = nn.Hardsigmoid()
예제 #23
0
def hsigmoid(inplace=False) -> nn.Module:
    return nn.Hardsigmoid(inplace=inplace)
예제 #24
0
    def __init__(self):
        super(Model, self).__init__()

        self.act_0 = nn.Hardsigmoid()
예제 #25
0
 def __init__(self, add_stub=False):
     super().__init__()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.add_stub = add_stub
     self.hsigmoid = nn.Hardsigmoid()
예제 #26
0
    def __init__(
        self,
        d_feat=6,
        output_dim=1,
        freq_dim=10,
        hidden_size=64,
        dropout_W=0.0,
        dropout_U=0.0,
        device="cpu",
    ):
        super().__init__()

        self.input_dim = d_feat
        self.output_dim = output_dim
        self.freq_dim = freq_dim
        self.hidden_dim = hidden_size
        self.device = device

        self.W_i = nn.Parameter(
            init.xavier_uniform_(torch.empty(
                (self.input_dim, self.hidden_dim))))
        self.U_i = nn.Parameter(
            init.orthogonal_(torch.empty(self.hidden_dim, self.hidden_dim)))
        self.b_i = nn.Parameter(torch.zeros(self.hidden_dim))

        self.W_ste = nn.Parameter(
            init.xavier_uniform_(torch.empty(self.input_dim, self.hidden_dim)))
        self.U_ste = nn.Parameter(
            init.orthogonal_(torch.empty(self.hidden_dim, self.hidden_dim)))
        self.b_ste = nn.Parameter(torch.ones(self.hidden_dim))

        self.W_fre = nn.Parameter(
            init.xavier_uniform_(torch.empty(self.input_dim, self.freq_dim)))
        self.U_fre = nn.Parameter(
            init.orthogonal_(torch.empty(self.hidden_dim, self.freq_dim)))
        self.b_fre = nn.Parameter(torch.ones(self.freq_dim))

        self.W_c = nn.Parameter(
            init.xavier_uniform_(torch.empty(self.input_dim, self.hidden_dim)))
        self.U_c = nn.Parameter(
            init.orthogonal_(torch.empty(self.hidden_dim, self.hidden_dim)))
        self.b_c = nn.Parameter(torch.zeros(self.hidden_dim))

        self.W_o = nn.Parameter(
            init.xavier_uniform_(torch.empty(self.input_dim, self.hidden_dim)))
        self.U_o = nn.Parameter(
            init.orthogonal_(torch.empty(self.hidden_dim, self.hidden_dim)))
        self.b_o = nn.Parameter(torch.zeros(self.hidden_dim))

        self.U_a = nn.Parameter(init.orthogonal_(torch.empty(self.freq_dim,
                                                             1)))
        self.b_a = nn.Parameter(torch.zeros(self.hidden_dim))

        self.W_p = nn.Parameter(
            init.xavier_uniform_(torch.empty(self.hidden_dim,
                                             self.output_dim)))
        self.b_p = nn.Parameter(torch.zeros(self.output_dim))

        self.activation = nn.Tanh()
        self.inner_activation = nn.Hardsigmoid()
        self.dropout_W, self.dropout_U = (dropout_W, dropout_U)
        self.fc_out = nn.Linear(self.output_dim, 1)

        self.states = []
예제 #27
0
 def __init__(self, channels: int, factor: Union[int, float] = 1 / 4):
     super().__init__()
     self.squeeze_excite = nn.Sequential(
         nn.Linear(channels, math.floor(channels * factor)), nn.ReLU(),
         nn.Linear(math.floor(channels * factor), channels),
         nn.Hardsigmoid())