예제 #1
0
    def forward(self, x):
        x = F.relu(self.batch_norm_conv1(self.conv1(x)))
        x = F.relu(self.batch_norm_conv2(self.conv2(x)))
        x = F.relu(self.batch_norm_conv3(self.conv3(x)))
        x = self.pool(x)
        x = F.relu(self.batch_norm_conv4(self.conv4(x)))
        x = F.relu(self.batch_norm_conv5(self.conv5(x)))
        x = F.relu(self.batch_norm_conv6(self.conv6(x)))
        x = self.pool(x)
        x = F.relu(self.batch_norm_conv7(self.conv7(x)))
        x = F.relu(self.batch_norm_conv8(self.conv8(x)))
        x = F.relu(self.batch_norm_conv9(self.conv9(x)))
        x = self.pool(x)

        # Note: in case of a maxpooling layer and relu activation function,
        #       maxpool(relu(conv(x))) = relu(maxpool(conv(x)))
        #       (relu is an element-wise, monotonically increasing, non-linear function)

        # flatten image input
        x = x.view(-1, 512 * 4 * 4)

        # dropout layer before fully connected Linear layers
        x = self.dropout(x)

        # 1st hidden layer with relu activation function
        x = F.relu(self.batch_norm_fc1(self.fc1(x)))

        # dropout
        x = self.dropout(x)

        # 2nd hidden layer (what activation function?? relu, softmax, log-softmax, ...)
        x = self.fc2(x)

        return x
예제 #2
0
def european_payoff(input, strike=1.0, call=True, keepdim=False):
    """Returns the payoff of a European option

    Args:
        input (Tensor): The price of the underlying asset.
        strike (float): The strike price
        call (bool): Specifies call or put.

    Shape:

        - input : :math:`(*, T)`
        - output : :math:`(*)`

    Returns:
        Tensor: Payoff

    Examples:

        >>> # TODO
    """
    if call:
        payoff = fn.relu(input[..., -1] - strike)
    else:
        payoff = fn.relu(strike - input[..., -1])

    if keepdim:
        out = torch.zeros_like(input)
        out[..., -1] = payoff
        return out
    else:
        return payoff
예제 #3
0
 def forward(self, x):
     out = F.relu(self.bn1(self.conv1(x)))
     out = F.relu(self.bn2(self.conv2(out)))
     out = self.bn3(self.conv3(out))
     out += self.downsample(x)
     out = F.relu(out)
     return out
예제 #4
0
 def forward(self, state):
     v = self.fc1(state)
     v = F.relu(v)
     v = self.fc2(v)
     v = F.relu(v)
     v = self.fc3(v) # last layer? like tanh?
     return v
예제 #5
0
 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
     x = x.view(-1, 320)
     x = F.relu(self.fc1(x))
     feature = F.dropout(x, training=self.training)
     return self.fc2(feature), feature
예제 #6
0
 def forward(self, state):
     x = F.relu(self.fc1(state))
     x = F.relu(self.fc2(x))
     pi = self.pi(x)
     v = self.v(x)
     return (pi, v
             )  # return both outputs from network - with different layers
 def forward(self, x):
     x = self.pool(F.relu(self.conv1(x)))
     x = self.pool(F.relu(self.conv2(x)))
     x = x.view(-1, 16 * 5 * 5)
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc2(x))
     x = self.fc3(x)
     return x
예제 #8
0
 def forward(self,x):  
     x = F.relu(self.bn1(self.conv1(x)))
     x = self.dropout(self.pool1(x))
     x = F.relu(self.bn2(self.conv2(x)))
     x = F.relu(self.bn3(self.conv2(x)))
     x = F.relu(self.bn4(self.conv2(x)))
     x = self.pool2(x)
     return x 
예제 #9
0
 def forward(self,x):
     x= F.max_pool2d(F.relu(self.conv1(x)), (2,2))
     x= F.max_pool2d(F.relu(self.conv2(x)), 2)
     x= x.view(-1, self.num_flat_features(x))
     x= F.relu(self.fc1(x))
     x= F.relu(self.fc2(x))
     x= self.fc3(x)
     return x
예제 #10
0
 def forward(self, x):
     # x = self.gen(x)
     x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))  # 输入x经过卷积conv1之后,经过激活函数ReLU(原来这个词是激活函数的意思),使用2x2的窗口进行最大池化Max pooling,然后更新到x。
     x = F.max_pool2d(F.relu(self.conv2(x)), 2)  # 输入x经过卷积conv2之后,经过激活函数ReLU,使用2x2的窗口进行最大池化Max pooling,然后更新到x。
     x = x.view(-1, self.num_flat_features(x))  # view函数将张量x变形成一维的向量形式,总特征数并不改变,为接下来的全连接作准备。
     x = F.relu(self.fc1(x))  # 输入x经过全连接1,再经过ReLU激活函数,然后更新x
     x = F.relu(self.fc2(x))  # 输入x经过全连接2,再经过ReLU激活函数,然后更新x
     x = self.fc3(x)  # 输入x经过全连接3,然后更新x
     return x
예제 #11
0
 def forward(self, x):
     x = self.pool(F.relu(self.conv1(x)))
     x = self.pool(F.relu(self.conv2(x)))
     x = x.view(-1, 16 * 5 * 5)
     #不确定reshape成几行就用-1,列是确定的
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc2(x))
     x = self.fc3(x)
     return x
예제 #12
0
 def forward(self, x):
     # Max pooling over a (2, 2) window
     x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
     # If the size is a square you can only specify a single number
     x = F.max_pool2d(F.relu(self.conv2(x)), 2)
     x = x.view(-1, self.num_flat_features(x))
     x = F.relu(self.fc1(x))
     x = F.relu(self.fc2(x))
     x = self.fc3(x)
     return x
예제 #13
0
    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2, 2)

        # print(x.shape) #(for size)
        x = x.view(-1, 4*4*50) # batch size: -1 (don't know)
        x = F.relu(self.fc1(x))
        x = self.fx2(x)
        return F.log_softmax(x, dim=1)
예제 #14
0
    def forward(self, x):
        # transform the input
        x = self.stn(x)

        # Perform the usual forward pass
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 320)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
예제 #15
0
    def forward(self, x):
        """Given an image x, returns a transformed image."""
        # define feedforward behavior, applying activations as necessary

        out = self.conv1(x)
        out = self.conv2(out)
        out = self.conv3(out)

        out = self.res_blocks(out)

        out = F.relu(self.deconv1(out))
        out = F.relu(self.deconv2(out))
        out = F.tanh(self.deconv3(out))  # tanh activation in last layer

        return out
예제 #16
0
    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x, inplace=True)  # inplace=True , True 는 직접 (복사본이 아닌 원본)값을 변경한다는 뜻
                                     # 복사본을 안 만듦으로써, 메모리를 아낄 수 있다.
        x = F.max_pool2d(x, (2, 2))
        x = self.conv2(x)
        x = F.relu(x, inplace=True)
        x = F.max_pool2d(x, (2, 2))
        x = x.view(x.shape[0], -1)
        x = self.fc1(x)
        x = F.relu(x, inplace=True)
        x = self.fc1(x)
        out = F.relu(x, inplace=True)

        return out
def _pairwise_distances(embeddings, squared=True):
    ''' Returns pairwise distances for a batch of embeddings
        Computational reference:
        https://www.robots.ox.ac.uk/~albanie/notes/Euclidean_distance_trick.pdf
    Args:
        embeddings: tensor of shape (batch_size, embedding_dim)
        squared: the squared euclidean distance matrix is computed when true
    Returns:
        pairwise distances between all the embeddings of shape (batch_size, batch_size)
    '''

    gram_matrix = torch.matmul(embeddings, torch.transpose(embeddings, 0, 1))
    
    diag = torch.diag(gram_matrix)

    # D(x, y) = ||x||^2 - 2 <a, b> + ||y||^2
    dists = diag + diag.T - 2 * gram_matrix

    if not squared:
        # sqrt produces zero values for infinite gradiences
        # add double precision epsilon
        dists = torch.sqrt(dists + 1e-16)

        # clamp negative values that occur due to lack of floating point precision
        dists = F.relu(dists)

    return dists
예제 #18
0
 def forward(self, x, edge_index, cache_name):
     for i, conv_layer in enumerate(self.conv_layers):
         x = conv_layer(x, edge_index, cache_name)
         if i < len(self.conv_layers) - 1:
             x = F.relu(x)
             x = self.dropout_layers[i](x)
     return x
예제 #19
0
 def forward(self, x):
     x = self.conv1(x)
     x = F.relu(self.bn1(x))
     x = self.pool1(x)
     x = self.conv2(x)
     x = F.relu(self.bn2(x))
     x = self.pool2(x)
     x = self.conv3(x)
     x = F.relu(self.bn3(x))
     x = self.pool3(x)
     x = self.conv4(x)
     x = F.relu(self.bn4(x))
     x = self.pool4(x)
     x = F.avg_pool1d(x, x.shape[-1])
     x = x.permute(0, 2, 1)
     x = self.fc1(x)
     return F.log_softmax(x, dim=2)
예제 #20
0
    def forward(self, x):
        output = x.transpose(1, 2)
        output = self.w2(F.relu(self.w1(output)))
        output = self.dropout(output.transpose(1, 2))

        # add residual and norm layer
        output = self.layer_norm(x + output)
        return output
예제 #21
0
 def forward(self, x):
     out = self.conv1(x)
     out = self.trans1(self.dense1(out))
     out = self.trans2(self.dense2(out))
     out = self.dense3(out)
     out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
     out = F.log_softmax(self.fc(out))
     return out
예제 #22
0
	def forward(self, x):
		output = x.transpose(1, 2)
		output = self.w2(F.relu(self.w1(output)))
		output = slf.dropout(output.transpose(1, 2))

		# Add Residual Layer and Norm Layer
		output = self.layer_norm(x + output)
		return output
예제 #23
0
 def forward(self, x):
     x = self.avgpooling(x)
     x = self.conv(x)
     x = torch.flatten(x, start_dim=1)
     x = F.dropout(x, 0.5, training=self.training)
     x = F.relu(self.fc1(x), inplace=True)
     x = F.dropout(x, 0.5, training=self.training)
     x = self.fc2(x)
     return x
예제 #24
0
 def forward(self,x):
     x = F.relu(self.conv1(x))
     x = self.layer1(x)
     x = self.layer2(x)
     x = self.layer3(x)
     x = self.layer4(x)
     x = F.avg_pool2d(x,4)
     x = x.view(x.size(0),-1)
     x = self.fc1(x)
     return x
예제 #25
0
 def forward(self, x):
     if self.linear_or_not:
         # If linear model
         return self.linear(x)
     else:
         # If MLP
         h = x
         for layer in range(self.num_layers - 1):
             h = F.relu(self.batch_norms[layer](self.linears[layer](h)))
         return self.linears[self.num_layers - 1](h)
예제 #26
0
 def forward(self, x):
     num_points = x.size(2)
     batchsize = x.size()[0]
     trans = self.stn(x)
     x = x.transpose(2, 1)
     # x = torch.bmm(x, trans)
     x = torch.matmul(x, trans)
     x = x.transpose(2, 1)
     x = F.relu(self.conv1(x))
     pointfeat = x
     x = F.relu(self.conv2(x))
     x = self.conv3(x)
     # x = self.mp1(x)
     x, _ = torch.max(x, dim=2, keepdim=True)
     x = x.view(-1, 1024)
     if self.global_feat:
         return x, trans
     else:
         x = x.view(-1, 1024, 1).repeat(1, 1, num_points)
         return torch.cat([x, pointfeat], 1), trans
예제 #27
0
 def forward(self, input_features):
     features_output1 = self.classifier1(input_features)
     if self.act_func == "gelu":
         features_output1 = F.gelu(features_output1)
     elif self.act_func == "relu":
         features_output1 = F.relu(features_output1)
     elif self.act_func == "tanh":
         features_output1 = F.tanh(features_output1)
     else:
         raise ValueError
     features_output1 = self.dropout(features_output1)
     features_output2 = self.classifier2(features_output1)
     return features_output2
예제 #28
0
    def forward(self, x):
        num_points = x.size(2)
        batchsize = x.size()[0]
        # print(x.size())
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))
        # x = self.mp1(x)
        x, _ = torch.max(x, dim=2, keepdim=True)
        x = x.view(-1, 1024)

        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)

        iden = torch.from_numpy(
            np.array([1, 0, 0, 0, 1, 0, 0, 0,
                      1]).astype(np.float32)).view(1, 9).repeat(batchsize, 1)
        if x.is_cuda:
            iden = iden.cuda()
        x = x + iden
        x = x.view(-1, 3, 3)
        return x
예제 #29
0
 def forward(self, x):
     if self.upsample:
         new_features = []
         for layer in self.layers:
             out = layer(x)
             x = torch.cat([x, out], 1)
             new_features.append(out)
         out = torch.cat(new_features, 1)
         fm_size = out.size()[2]
         scale_weight = F.avg_pool2d(out, fm_size)
         scale_weight = F.relu(self.SE_upsample1(scale_weight))
         scale_weight = F.sigmoid(self.SE_upsample2(scale_weight))
         out = out * scale_weight.expand_as(out)
         return out
     else:
         for layer in self.layers:
             out = layer(x)
             x = torch.cat([x, out], 1)  # 1 = channel axis
         fm_size = x.size()[2]
         scale_weight = F.avg_pool2d(x, fm_size)
         scale_weight = F.relu(self.SE1(scale_weight))
         scale_weight = F.sigmoid(self.SE2(scale_weight))
         x = x * scale_weight.expand_as(x)
         return x
예제 #30
0
 def dec_act(self, inputs):
     if self.args.dec_act == 'tanh':
         return F.tanh(inputs)
     elif self.args.dec_act == 'elu':
         return F.elu(inputs)
     elif self.args.dec_act == 'relu':
         return F.relu(inputs)
     elif self.args.dec_act == 'selu':
         return F.selu(inputs)
     elif self.args.dec_act == 'sigmoid':
         return F.sigmoid(inputs)
     elif self.args.dec_act == 'linear':
         return inputs
     else:
         return F.elu(inputs)