Exemplo n.º 1
0
 def __init__(self,
              num_factors,
              num_users,
              num_items,
              L=5,
              d=16,
              d_prime=4,
              drop_ratio=0.05,
              **kwargs):
     super(Caser, self).__init__(**kwargs)
     self.P = nn.Embedding(num_users, num_factors)
     self.Q = nn.Embedding(num_items, num_factors)
     self.d_prime, self.d = d_prime, d
     # Vertical convolution layer
     self.conv_v = nn.Conv2D(d_prime, (L, 1), in_channels=1)
     # Horizontal convolution layer
     h = [i + 1 for i in range(L)]
     self.conv_h, self.max_pool = nn.Sequential(), nn.Sequential()
     for i in h:
         self.conv_h.add(nn.Conv2D(d, (i, num_factors), in_channels=1))
         self.max_pool.add(nn.MaxPool1D(L - i + 1))
     # Fully-connected layer
     self.fc1_dim_v, self.fc1_dim_h = d_prime * num_factors, d * len(h)
     self.fc = nn.Dense(in_units=d_prime * num_factors + d * L,
                        activation='relu',
                        units=num_factors)
     self.Q_prime = nn.Embedding(num_items, num_factors * 2)
     self.b = nn.Embedding(num_items, 1)
     self.dropout = nn.Dropout(drop_ratio)
Exemplo n.º 2
0
    def __init__(self, channels, reduction=16):
        super(SELayer, self).__init__()

        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Conv2D(channels,
                      channels // reduction,
                      kernel_size=1,
                      padding=0), nn.ReLU(inplace=True),
            nn.Conv2D(channels,
                      channels // reduction,
                      kernel_size=1,
                      padding=0), nn.Sigmoid())
Exemplo n.º 3
0
def convolution_block(filters, size, strides=(1, 1), padding='same', activation=True):
    result = nn.Sequential(
        nn.Conv2D(filters, size, strides=strides, padding=padding),
        nn.BatchNorm2d(),
        nn.ReLU6()
    )
    return result
Exemplo n.º 4
0
	def __init__(self):
		super(Generator,self).__init__()

		#first layer of convolution without the residual block
		self.conv1=nn.Sequuential(nn.Conv2d(3,64,9,1,4),
								  nn.Prelu())

		#a serie of residual blocks (8)
		resBlocks=[]
		for i in range(8):
			resBlocks.append(ResBlock(64))
		self.resBlocks=nn.Sequuential(*resBlocks)

		#second convolution after the residual blocks
		self.conv2=nn.Sequential(nn.Conv2d(64,64,3,1,1),nn.BatchNorm2D(64,0.8))

		# upsampling layer with PixelShuffle like stated in the paper
		self.upsampling=nn.Sequential(nn.Conv2d(64,256,3,1,1),
									  nn.BatchNorm2D(256),
									  nn.PixelShuffle(2),
									  nn.PRelu(),
									  nn.Conv2d(64,256,3,1,1),
									  nn.BatchNorm2D(256),
									  nn.PixelShuffle(2),
									  nn.PRelu())
		self.conv3=nn.Sequential(nn.Conv2D(64,3,9,1,4),nn.Tahn())
Exemplo n.º 5
0
    def __init__(self,
                 state_size,
                 action_size,
                 learning_rate,
                 name='DQNetwork'):
        self.state_size = state_size
        self.action_size = action_size
        self.learning_rate = learning_rate
        linear = nn.linear

        self.feature = nn.Sequential(
            nn.Conv2d(in_channels=4,
                      out_channels=32,
                      kernel_size=8,
                      filters=32,
                      stride=4), nn.ELU(), nn.BatchNorm2d(32),
            nn.Conv2d(in_channels=4,
                      out_channels=64,
                      kernel_size=4,
                      filters=64,
                      stride=2), nn.ELU(), nn.BatchNorm2d(64),
            nn.Conv2D(in_channels=4,
                      out_channels=128,
                      kernel_size=4,
                      filters=128), nn.BatchNorm2d(128), nn.ELU(), Flatten(),
            linear(7 * 7 * 64, 256), nn.ELU(), linear(256, 448), nn.ELU())
def hg(**kwargs):
    model = HourglassNet(
        Bottleneck2D,
        head=kwargs.get("head", lambda c_in, c_out: nn.Conv2D(c_in, c_out, 1)),
        depth=kwargs["depth"],
        num_stacks=kwargs["num_stacks"],
        num_blocks=kwargs["num_blocks"],
        num_classes=kwargs["num_classes"],
    )
    return model
Exemplo n.º 7
0
def LateralConnect2D(upsample2D,
                     downsample2D,
                     filters):
    channels = upsample2D.view(upsample2D.size(0), -1)
    result = torch.add(upsample2D, nn.Conv2d(
        channels, filters, 1, (1, 1))(downsample2D))

    result = nn.Conv2D(channels, filters, 1, (1, 1))(result)

    return result
	def __init__(self):
		super().__init__()
		self.encoder = nn.Sequential(
      nn.Conv2D(36, 64),
      nn.ReLU(),
      nn.Linear(64, 3))
		self.decoder = nn.Sequential(
      nn.Linear(3, 64),
      nn.ReLU(),
      nn.Linear(64, 28 * 28))
Exemplo n.º 9
0
    def __init__(self, start_neurons, dropout_ratio=0.5):
        super().__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2D(start_neurons * 1, (3, 3), activation=None, padding=0),
            self.residual_block(start_neurons * 1),
            self.residual_block(start_neurons * 1),
            nn.ReLU6(),
            nn.MaxPool2d((2, 2)),
            nn.Dropout2d(dropout_ratio / 2),
        )
        self.conv2 = nn.Sequential(
            nn.Conv2D(start_neurons * 2, (3, 3), activation=None, padding=0),
            self.residual_block(start_neurons * 2),
            self.residual_block(start_neurons * 2),
            nn.ReLU6(),
            nn.MaxPool2d((2, 2)),
            nn.Dropout2d(dropout_ratio)
        )

        self.conv3 = nn.Sequential(
            nn.Conv2D(start_neurons * 4, (3, 3), activation=None, padding=0),
            self.residual_block(start_neurons * 4),
            self.residual_block(start_neurons * 4),
            nn.ReLU6(),
            nn.MaxPool2d((2, 2)),
            nn.Dropout2d(dropout_ratio)
        )

        self.conv4 = nn.Sequential(
            nn.Conv2d(start_neurons * 8, (3, 3), activation=None, padding=0),
            self.residual_block(start_neurons * 8),
            self.residual_block(start_neurons * 8),
            nn.ReLU6(),
            nn.MaxPool2d((2, 2)),
            nn.Dropout2d(dropout_ratio),

        )
Exemplo n.º 10
0
    def __init__(self):
        super(simpleModel, self).__init__()

        self.conv = nn.Conv2D(in_channels=3,
                              out_channels=16,
                              kernel_size=3,
                              padding=1,
                              strides=1,
                              bias=False)

        self.batchnorm = nn.BatchNorm2D(16, affine=True)
        self.relu = nn.Activation('relu')
        self.avg_pool = AvgPool2D(8)
        self.flatten = Flatten()
        self.fc = nn.Linear(256,
                            10)  #torch에는 softmax가 없고 대신 crossetnrotpyloss를 쓴다
Exemplo n.º 11
0
    def __init__(self, state_size):
        super().__init__(name='')

        prev_channels = state_size[-1]
        self.activation = nn.ReLU()
        self.conv2A = nn.Conv2d(in_channels=prev_channels,
                                out_channels=32,
                                kernel_size=8,
                                stride=4)
        self.conv2B = nn.Conv2d(in_channels=32,
                                out_channels=64,
                                kernel_size=4,
                                stride=2)
        self.conv2C = nn.Conv2d(in_channels=64,
                                out_channels=64,
                                kernel_size=3,
                                stride=1)
        self.conv2d = nn.Conv2D(in_channels=64,
                                out_channels=64,
                                kernel_size=3,
                                stride=1)
Exemplo n.º 12
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 width_factor=1,
                 base_width=64):
        super(ResBlock, self).__init__()
        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(in_channels,
                               out_channels,
                               3,
                               padding=1,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2D(out_channels,
                               out_channels,
                               3,
                               padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)

        self.res_con = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 1, bias=False, stride=stride),
            nn.BatchNorm2d(out_channels))
Exemplo n.º 13
0
 def __init__(self):
     super(BaselineAMT,self).__init__()
     self.conv1 = nn.Conv2D(1,64,kernel_size=3)
     self.conv2 = nn.Conv2D(64,64,kernel_size=3)
     self.conv3 = nn.Conv2D(64,64,kernel_size=3)
     return
Exemplo n.º 14
0
 def _conv_layer(self, in_channels, out_channels, kernel_size, stride,
                 padding):
     return nn.Sequential(
         nn.Conv2D(in_channels, out_channels, kernel_size, stride, padding),
         nn.ReLU(-1)
     )
Exemplo n.º 15
0
    def __init__(self,
                 D_shape_in,
                 A_dim,
                 conv_hiddens,
                 kernel_sizes=5,
                 strides=2,
                 linear_hiddens=[],
                 D_out=1,
                 activation=F.relu,
                 use_batch_norm=True,
                 transform_hiddens=[],
                 use_action_gating=False,
                 use_residual=False):
        """
        D_shape_in: tupe of two ints, the shape of input images
        D_out: a int or a list of ints in length of degree of freedoms
        hiddens, kernel_sizes, strides: either an int or a list of ints with the same length
        """
        super(DDPGCNNCritic, self).__init__()
        if isinstance(conv_hiddens, int): conv_hiddens = [conv_hiddens]
        if isinstance(linear_hiddens, int): linear_hiddens = [linear_hiddens]
        if isinstance(kernel_sizes, int): kernel_sizes = [kernel_sizes]
        if isinstance(strides, int): strides = [strides]
        self.n_layer = max(len(conv_hiddens), len(kernel_sizes), len(strides))
        self.hiddens = conv_hiddens
        self.kernel_sizes = kernel_sizes
        self.strides = strides
        self.use_residual = use_residual
        if use_residual:
            assert use_batch_norm, 'residual network requires batch norm!'
        if len(self.hiddens) == 1: self.hiddens = self.hiddens * self.n_layer
        if len(self.kernel_sizes) == 1:
            self.kernel_sizes = self.kernel_sizes * self.n_layer
        if len(self.strides) == 1: self.strides = self.strides * self.n_layer

        assert (
            (len(self.hiddens) == len(self.kernel_sizes))
            and (len(self.strides) == len(self.hiddens))
        ), '[CNNGumbelPolicy] hiddens, kernel_sizes, strides must share the same length'

        assert (isinstance(D_out, int))

        self.out_dim = D_out
        self.func = activation

        # Convolution Layers for Image Features
        self.conv_layers = []
        self.bc_layers = []
        prev_hidden = D_shape_in[0]
        for i, dat in enumerate(
                zip(self.hiddens, self.kernel_sizes, self.strides)):
            h, k, s = dat
            if self.use_residual and (i > 0):
                shortcut = None if (s == 1) and (
                    h == prev_hidden) else nn.Conv2D(
                        prev_hidden, h, kernel_sizes=1, stride=s)
                if shortcut is not None:
                    setattr('resconv_block%d_shortcut' % i, shortcut)
                    utils.initialize_weights(shortcut)
                conv1 = nn.Conv2d(prev_hidden,
                                  prev_hidden,
                                  kernel_size=k,
                                  stride=1)
                conv2 = nn.Conv2d(prev_hidden, h, kernel_size=k, stride=s)
                setattr(self, 'resconv_block%d_conv1' % i, conv1)
                setattr(self, 'resconv_block%d_conv2' % i, conv2)
                utils.initialize_weights(conv1)
                utils.initialize_weights(conv2)
                bc1 = nn.BatchNorm2d(prev_hidden)
                bc2 = nn.BatchNorm2d(h)
                setattr(self, 'resconv_block%d_bc1' % i, bc1)
                setattr(self, 'resconv_block%d_bc2' % i, bc2)
                utils.initialize_weights(bc1)
                utils.initialize_weights(bc2)
                self.bc_layers.append((bc1, bc2))
                self.conv_layers.append((conv1, conv2, shortcut))
            else:
                self.conv_layers.append(
                    nn.Conv2d(prev_hidden, h, kernel_size=k, stride=s))
                setattr(self, 'conv_layer%d' % i, self.conv_layers[-1])
                utils.initialize_weights(self.conv_layers[-1])
                if use_batch_norm:
                    self.bc_layers.append(nn.BatchNorm2d(h))
                    setattr(self, 'bc_layer%d' % i, self.bc_layers[-1])
                    utils.initialize_weights(self.bc_layers[-1])
                else:
                    self.bc_layers.append(None)
            prev_hidden = h
        self.feat_size = self._get_feature_dim(D_shape_in)
        print('Feature Size = %d' % self.feat_size)
        # Transformation Layers to change the action dimension
        self.use_action_gating = use_action_gating
        self.trans_layers = []
        if use_action_gating:
            if transform_hiddens[-1] != self.feat_size:
                transform_hiddens.append(self.feat_size)
        for i, d in enumerate(transform_hiddens):
            self.trans_layers.append(nn.Linear(A_dim, d))
            setattr(self, 'trans_layer%d' % i, self.trans_layers[-1])
            utils.initialize_weights(self.trans_layers[-1])
            A_dim = d
        # Final Layers for produce final Q value
        self.linear_layers = []
        if not use_action_gating:
            # concatenate feature and action
            prev_dim = self.feat_size + A_dim
        else:
            # use gating
            prev_dim = self.feat_size
        linear_hiddens.append(self.out_dim)
        for i, d in enumerate(linear_hiddens):
            self.linear_layers.append(nn.Linear(prev_dim, d))
            setattr(self, 'linear_layer%d' % i, self.linear_layers[-1])
            utils.initialize_weights(self.linear_layers[-1],
                                     small_init=(i == len(linear_hiddens) - 1))
            prev_dim = d