Ejemplo n.º 1
0
def problem(request) -> Tuple[Tensor, Module]:
    """Problem setting.

    Args:
        request: pytest request, contains parameters

    Yields:
        inputs and model

    Raises:
        NotImplementedError: if problem string is unknown
    """
    batch_size, in_dim, out_dim = 2, 3, 4
    inputs = rand(batch_size, in_dim)
    if request.param == PROBLEM_STRING[0]:
        model = Sequential(Linear(in_dim, out_dim), ReLU(), Linear(out_dim, out_dim))
    elif request.param == PROBLEM_STRING[1]:
        model = Sequential(Linear(in_dim, out_dim), Flatten(), Linear(out_dim, out_dim))
    elif request.param == PROBLEM_STRING[2]:
        inputs = rand(batch_size, in_dim, in_dim)
        model = Sequential(
            Linear(in_dim, out_dim), Flatten(), Linear(in_dim * out_dim, out_dim)
        )
    else:
        raise NotImplementedError(f"unknown request.param={request.param}")
    yield inputs, model
Ejemplo n.º 2
0
    def __init__(self, args, n_dyn_fea, MAX_CAT_ID, MAX_DEPT_ID):
        super(dilated_CNN, self).__init__()
        # params
        seq_len = args.use_days
        self.n_dyn_fea = n_dyn_fea
        self.n_dilated_layers = 3
        kernel_size = 2
        n_filters = 3
        max_cat_id = [MAX_DEPT_ID, MAX_CAT_ID]
        n_outputs = 28
        dropout_rate = 0.1

        # layers for categorical input
        self.lambda0 = LambdaLayer(lambda x: x[:, 0])
        self.lambda1 = LambdaLayer(lambda x: x[:, 1])
        self.embedding0 = Embedding(max_cat_id[0] + 1,
                                    ceil(log(max_cat_id[0] + 1)))
        self.embedding1 = Embedding(max_cat_id[1] + 1,
                                    ceil(log(max_cat_id[1] + 1)))
        self.flatten0 = Flatten()
        self.flatten1 = Flatten()

        # Dilated convolutional layers
        self.conv1d = CausalConv1d(in_channels=n_dyn_fea,
                                   out_channels=n_filters,
                                   kernel_size=kernel_size,
                                   dilation=1)
        self.conv1d_dilated0 = CausalConv1d(in_channels=n_filters,
                                            out_channels=n_filters,
                                            kernel_size=kernel_size,
                                            dilation=2)
        self.conv1d_dilated1 = CausalConv1d(in_channels=n_filters,
                                            out_channels=n_filters,
                                            kernel_size=kernel_size,
                                            dilation=2**2)
        self.conv1d_dilated2 = CausalConv1d(in_channels=n_filters,
                                            out_channels=n_filters,
                                            kernel_size=kernel_size,
                                            dilation=2**3)

        # conv output layers
        self.conv1d_out = CausalConv1d(in_channels=n_filters * 2,
                                       out_channels=8,
                                       kernel_size=1)
        self.dropout_out = Dropout(dropout_rate)
        self.flatten_out = Flatten()

        # layers for concatenating with cat and num features
        self.dense_concat0 = Linear(in_features=1669, out_features=56)
        self.dense_concat1 = Linear(in_features=56, out_features=n_outputs)
Ejemplo n.º 3
0
def TictactoeNet(board_size):
    x = 3 * board_size * board_size
    layers_fir = 243
    layers_sec = 243
    y = 1
    in_features, hidden_features_fir, hidden_features_sec, out_features = x, layers_fir, layers_sec, y

    model_layer_fir = Sequential(Flatten(),
                                 Linear(in_features, hidden_features_fir))
    model_layer_sec = Sequential(
        Flatten(), Linear(hidden_features_fir, hidden_features_sec))

    model = Sequential(Flatten(), Linear(hidden_features_sec, out_features))

    return model
Ejemplo n.º 4
0
 def __init__(self, triplets_dataset: TripletsDataset, dimension: int):
     super().__init__()
     self.high_invariance_net = alexnet(pretrained=True)
     self.low_invariance_net_1 = Sequential(
         Conv2d(3, 3, 1, stride=4, padding=2),
         Conv2d(3, 96, 8, stride=4, padding=4),
         MaxPool2d(3, stride=4, padding=0),
         Flatten(),
     )
     self.low_invariance_net_2 = Sequential(
         Conv2d(3, 3, 1, stride=8, padding=2),
         Conv2d(3, 96, 8, stride=4, padding=4),
         MaxPool2d(7, stride=2, padding=3),
         Flatten(),
     )
Ejemplo n.º 5
0
    def __init__(self, n_actions=4, n_channels=4):
        super().__init__()

        self.phi = Sequential(
            # f_32 k_3 s_2 p_1
            Conv2d(n_channels, 32, 3, stride=2, padding=1, bias=True),
            ELU(),
            Conv2d(32, 32, 3, stride=2, padding=1, bias=True),
            ELU(),
            Conv2d(32, 32, 3, stride=2, padding=1, bias=True),
            ELU(),
            Conv2d(32, 32, 3, stride=2, padding=1, bias=True),
            ELU(),
            Identity(),  # tap
            Flatten(1, -1),
        )

        self.gee = torch.nn.Sequential(
            Linear(2 * 32 * 3 * 3, 256, bias=True),
            ReLU(),
            Linear(256, n_actions, bias=True),
        )

        self.eff = torch.nn.Sequential(
            Linear(32 * 3 * 3 + n_actions, 256, bias=True),
            ReLU(),
            Linear(256, 32 * 3 * 3, bias=True),
        )

        self.n_actions, self.n_emb_dim = n_actions, 32 * 3 * 3
 def __init__(self):
     super(ANN, self).__init__()
     self.flatten = Flatten()
     self.sequential = Sequential(
         Linear(in_features=input_shape, out_features=512), ReLU(),
         Linear(in_features=512, out_features=512), ReLU(),
         Linear(in_features=512, out_features=10), ReLU())
Ejemplo n.º 7
0
    def __init__(self, num_layers, drop_ratio, mode='ir_se'):
        super().__init__()
        assert num_layers in [50, 100, 152], 'num_layers should be 50, 100 or 152'
        assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'

        blocks = get_blocks(num_layers)
        if mode == 'ir':
            unit_module = bottleneck_IR
        elif mode == 'ir_se':
            unit_module = bottleneck_IR_SE

        self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
                                      BatchNorm2d(64),
                                      PReLU(64))

        self.output_layer = Sequential(BatchNorm2d(512),
                                       Dropout(drop_ratio),
                                       Flatten(),
                                       Linear(512 * 7 * 7, 512),
                                       BatchNorm1d(512))
        modules = []
        for block in blocks:
            for bottleneck in block:
                modules.append(
                    unit_module(bottleneck.in_channel,
                                bottleneck.depth,
                                bottleneck.stride))

        self.body = Sequential(*modules)
Ejemplo n.º 8
0
def Warzone_NN(board_size):
    m = Sequential(
        Flatten(),
        Linear(in_features=6 * board_size * board_size,
               out_features=1,
               bias=True))
    return m
 def __init__(self):
     super(CNN, self).__init__()
     self.feature_extraction = Sequential(
         OrderedDict([
             ('conv_1',
              Conv2d(in_channels=1,
                     out_channels=4,
                     stride=1,
                     kernel_size=5,
                     padding=0)),  # 4x28x28
             ('bn_1', BatchNorm2d(num_features=4)),
             ('relu_1', ReLU(inplace=False)),
             ('pool_1', MaxPool2d(stride=2, kernel_size=2,
                                  padding=0)),  # 4x14x14
             ('drop_1', Dropout(p=0.1)),
             ('conv_2',
              Conv2d(in_channels=4,
                     out_channels=8,
                     stride=1,
                     kernel_size=5,
                     padding=0)),  # 8x10x10
             ('bn_2', BatchNorm2d(num_features=8)),
             ('relu_2', ReLU(inplace=False)),
             ('pool_2', MaxPool2d(stride=2, kernel_size=2,
                                  padding=0)),  # 8x5x5
             ('drop_2', Dropout(p=0.1)),
             ('flatten', Flatten(start_dim=1))
         ]))
     self.classifier = Sequential(
         OrderedDict([
             ('fc', Linear(in_features=(8 * 5 * 5), out_features=10)),  # 10
             ('softmax', Softmax(dim=1))
         ]))
Ejemplo n.º 10
0
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = Conv2d(3, 32, 5, padding=2)
        self.maxpool1 = MaxPool2d(2)
        self.conv2 = Conv2d(32, 32, 5, padding=2)
        self.maxpool2 = MaxPool2d(2)
        self.conv3 = Conv2d(32, 64, 5, padding=2)
        self.maxpool3 = MaxPool2d(2)
        self.flatten = Flatten()
        self.linear1 = Linear(1024, 64)
        self.linear2 = Linear(64, 10)

        self.model1 = Sequential(Conv2d(3, 32, 5, padding=2), MaxPool2d(2),
                                 Conv2d(32, 32, 5, padding=2), MaxPool2d(2),
                                 Conv2d(32, 64, 5, padding=2), MaxPool2d(2),
                                 Flatten(), Linear(1024, 64), Linear(64, 10))
Ejemplo n.º 11
0
 def __init__(self):
     super().__init__()
     self.flatten = Flatten()
     self.layer = Linear(28 * 28, 50)
     self.layer1 = Linear(50, 20)
     self.layer2 = Linear(20, 10)
     self.softmax = LogSoftmax()
Ejemplo n.º 12
0
    def __init__(self):
        super(Will, self).__init__()
        self.name = "Will"
        self.__version__ = "1.1"
        # self.pool = MaxPool2d(2)  # 2*2 max pooling

        self.cnn_relu_stack = Sequential(
            Conv2d(3, 16, 3),  # 3 in-channel, 16 out-channel, number of kernel
            ReLU(),
            # MaxPool2d(2),
            Conv2d(16, 24, 4),
            ReLU(),
            MaxPool2d(2),
            Conv2d(24, 56, 5),
            ReLU(),
            MaxPool2d(2),
            Conv2d(56, 112, 3),
            ReLU(),
            Flatten(),
            Linear(448, 240),
            ReLU(),
            Linear(240, 120),
            ReLU(),
            Linear(120, 10),  # 10 classes, final output
        )
Ejemplo n.º 13
0
    def __new__(cls, n_outputs, *, batch_norm=True):
        layers = [
            ('conv_0', Conv2d(4, 32, 8, 4, bias=not batch_norm)),
            ('bnorm0', BatchNorm2d(32, affine=True)) if batch_norm else None,
            ('relu_0', ReLU()),
            ('tap_0', Identity()),  # nop tap for viewer

            # ('drop_1', Dropout(p=0.5)),
            ('conv_1', Conv2d(32, 64, 4, 2, bias=not batch_norm)),
            ('bnorm1', BatchNorm2d(64, affine=True)) if batch_norm else None,
            ('relu_1', ReLU()),
            ('tap_1', Identity()),  # nop tap for viewer

            # ('drop_2', Dropout(p=0.2)),
            ('conv_2', Conv2d(64, 64, 3, 1, bias=not batch_norm)),
            ('bnorm2', BatchNorm2d(64, affine=True)) if batch_norm else None,
            ('relu_2', ReLU()),
            ('tap_2', Identity()),  # nop tap for viewer
            ('flat_3', Flatten(1, -1)),
            ('drop_3', Dropout(p=0.5)),
            ('dense3', Linear(64 * 7 * 7, 512, bias=True)),
            ('relu_3', ReLU()),
            ('drop_4', Dropout(p=0.2)),
            ('dense4', Linear(512, n_outputs, bias=True)),
        ]

        # filter out `None`s and build a sequential network
        return Sequential(OrderedDict(list(filter(None, layers))))
def main():
    """Create and execute an experiment."""
    model = AnalogSequential(
        Flatten(),
        AnalogLinear(INPUT_SIZE,
                     HIDDEN_SIZES[0],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        Sigmoid(),
        AnalogLinear(HIDDEN_SIZES[0],
                     HIDDEN_SIZES[1],
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        Sigmoid(),
        AnalogLinear(HIDDEN_SIZES[1],
                     OUTPUT_SIZE,
                     True,
                     rpu_config=SingleRPUConfig(device=ConstantStepDevice())),
        LogSoftmax(dim=1))

    # Create the training Experiment.
    experiment = BasicTrainingWithScheduler(dataset=FashionMNIST,
                                            model=model,
                                            epochs=EPOCHS,
                                            batch_size=BATCH_SIZE)

    # Create the runner and execute the experiment.
    runner = LocalRunner(device=DEVICE)
    results = runner.run(experiment, dataset_root=PATH_DATASET)
    print(results)
Ejemplo n.º 15
0
    def __init__(self, labels_dim=0, D_lr=2e-4):
        super(Discriminator, self).__init__()

        self.conv = Sequential(
            Conv2d(3, 16, kernel_size=3, stride=2, padding=1),
            LeakyReLU(0.2, inplace=True),
            Conv2d(16, 32, kernel_size=3, stride=2, padding=1),
            BatchNorm2d(32),
            LeakyReLU(0.2, inplace=True),
            Conv2d(32, 64, kernel_size=3, stride=2, padding=1),
            BatchNorm2d(64),
            LeakyReLU(0.2, inplace=True),
            Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
            BatchNorm2d(128),
            LeakyReLU(0.2, inplace=True),
            Conv2d(128, 256, kernel_size=4, stride=2, padding=0),
            BatchNorm2d(256),
            LeakyReLU(0.2, inplace=True),
        )
        self.flatten = Flatten()
        self.linear = Sequential(
            Linear(256, 1),
            Tanh(),
        )

        self.set_optimizer(optimizer, lr=D_lr)
Ejemplo n.º 16
0
    def __init__(self):
        super(Net_256mp, self).__init__()

        self.conv_model1 = nn.Sequential(
            nn.Conv2d(in_channels=channels,
                      out_channels=conv_out_channels,
                      kernel_size=kernel_size,
                      padding=conv_pad,
                      stride=conv_stride), nn.MaxPool2d(kernel_size=2,
                                                        stride=2),
            nn.ReLU(inplace=True), nn.BatchNorm2d(conv_out_channels),
            nn.Dropout(p=p)).to('cuda:0')

        self.conv_model2 = nn.Sequential(
            nn.Conv2d(in_channels=conv_out_channels,
                      out_channels=conv_out_channels,
                      kernel_size=kernel_size,
                      padding=conv_pad,
                      stride=conv_stride), nn.MaxPool2d(kernel_size=2,
                                                        stride=2),
            nn.ReLU(inplace=True), nn.BatchNorm2d(conv_out_channels),
            nn.Dropout(p=p),
            nn.Conv2d(in_channels=conv_out_channels,
                      out_channels=conv2_out_channels,
                      kernel_size=kernel_size,
                      padding=conv_pad,
                      stride=conv_stride), nn.MaxPool2d(kernel_size=2,
                                                        stride=2),
            nn.ReLU(inplace=True), nn.BatchNorm2d(conv2_out_channels),
            nn.Dropout(p=p),
            nn.Conv2d(in_channels=conv2_out_channels,
                      out_channels=conv2_out_channels,
                      kernel_size=kernel_size,
                      padding=conv_pad,
                      stride=conv_stride), nn.MaxPool2d(kernel_size=2,
                                                        stride=2),
            nn.ReLU(inplace=True), nn.BatchNorm2d(conv2_out_channels),
            nn.Dropout(p=p),
            nn.Conv2d(in_channels=conv2_out_channels,
                      out_channels=conv3_out_channels,
                      kernel_size=kernel_size,
                      padding=conv_pad,
                      stride=conv_stride), nn.MaxPool2d(kernel_size=2,
                                                        stride=2),
            nn.ReLU(inplace=True), nn.BatchNorm2d(conv3_out_channels),
            nn.Dropout(p=p),
            nn.Conv2d(in_channels=conv3_out_channels,
                      out_channels=conv3_out_channels,
                      kernel_size=kernel_size,
                      padding=conv_pad,
                      stride=conv_stride), nn.MaxPool2d(kernel_size=2,
                                                        stride=2),
            nn.ReLU(inplace=True), nn.BatchNorm2d(conv3_out_channels),
            nn.Dropout(p=p)).to('cuda:1')

        self.lin_model = nn.Sequential(
            Flatten(),
            Linear(in_features=conv3_out_channels * (256 // 64) * (256 // 64),
                   out_features=100), nn.ReLU(inplace=True), nn.Dropout(p=p),
            Linear(in_features=100, out_features=1)).to('cuda:1')
Ejemplo n.º 17
0
  def __init__(self, config, dropout = 0.2, temperature = None):
    super(TDConway, self).__init__()
    self.temperature = temperature
    
    self.stack_1 = ConvStack(3, 64, num_layers = 2, initial_depth = config.num_channels)

    self.pool  = MaxPool2d(3, 3, (0,1)) # Downsample (15,19) -> (5, 7)

    # See PyTorch docs for torch.nn.MaxPool2d
    pooled_height = (config.rows + 2) // 3
    pooled_width  = (config.cols + 4) // 3
    
    self.stack_2 = ConvStack(3, 128, num_layers = 2, initial_depth = 64)
    
    
    self.fc = Sequential(
      Flatten(), 
      Linear(128 * pooled_height * pooled_width, 256),
      SELU(),
      Dropout(dropout),
      Linear(256, 2048),
      SELU(),
      Dropout(dropout),
      Linear(2048, 1),
      Sigmoid()
    )
Ejemplo n.º 18
0
def ChessNet1(selection):

    state = game_file.initial_state(selection)

    module = Sequential(
        Flatten(), Linear(3 * len(state.board) * len(state.board[0]), 1, True))
    return module
Ejemplo n.º 19
0
  def __init__(self, config, dropout = 0.2):
    super(TDConway, self).__init__()
    
    self.stack_1 = ResidualConvStack(3, 64, layer_structure = [1,2,2,2], initial_depth = config.num_channels)

    self.pooler  = MaxPool2d(3, 3, (0,1)) # Downsample (15,19) -> (5, 7)

    # See PyTorch docs for torch.nn.MaxPool2d
    pooled_height = (config.rows + 2) // 3
    pooled_width  = (config.cols + 4) // 3
    
    self.stack_2 = ResidualConvStack(3, 128, layer_structure = [1,2,2,2], initial_depth = 64)
    
    
    self.fc = Sequential(
      Flatten(), 
      Linear(128 * pooled_height * pooled_width, 512),
      SELU(),
      Dropout(0), # Remove in eval
      Linear(512, 2048),
      SELU(),
      Dropout(0), # Remove in eval
      Linear(2048, 1),
      # Sigmoid() # Remove in evaluation version
    )
Ejemplo n.º 20
0
    def _init_layers(self):
        layers = []
        for idx, dims in enumerate(self.layers_dims):
            if idx == 0:
                current_dims = 3
                next_dims = dims
            else:
                current_dims = self.layers_dims[idx - 1]
                next_dims = dims

            layers.extend([
                Conv2d(in_channels=current_dims,
                       out_channels=next_dims,
                       kernel_size=3,
                       stride=2,
                       padding=1),
                BatchNorm2d(next_dims),
                self.activation(),
            ])

        self.flat_size = self.layers_dims[-1] * 5 * 5
        layers.extend([
            Flatten(),
            #Dropout(p=0.25),
            Linear(self.flat_size, int(self.flat_size / 9)),
            ReLU(),
            #Dropout(p=0.4),
        ])
        if self.non_variational:
            layers.append(Linear(int(self.flat_size / 9), self.latent_size))
        else:
            self.init_gaussian_params(int(self.flat_size / 9))

        return layers
Ejemplo n.º 21
0
  def __init__(self, batch_size, inputs, outputs):

#Initializing the superclass and storing the parameters
    super(CnnRegressor, self).__init__()
    self.batch_size = batch_size
    self.inputs = inputs
    self.outputs = outputs

#Defining the input layer with the input size, kernel size and output size
    self.input_layer = Conv1d(inputs, batch_size, 1)

    #Defining a max pooling layer with a kernel size
    self.max_pooling_layer = MaxPool1d(1)

#Defining another convolutional layer
    self.conv_layer = Conv1d(batch_size, 128, 1)

#Defining a flatten layer
    self.flatten_layer = Flatten()

#defining a linear layer with inputs and output as the arguments
    self.linear_layer = Linear(128, 64)

#Defining the output layer    
    self.output_layer = Linear(64, outputs)
Ejemplo n.º 22
0
  def __init__(self, batch_size, inputs, outputs):
    super(CnnRegressor, self).__init__()
    self.batch_size = batch_size
    self.inputs = inputs
    self.outputs = outputs

    self.input_bn = nn.BatchNorm1d(8) #batch normalization
    
    self.input_layer = Conv1d(inputs, batch_size, 1) #input layer

    self.max_pooling_layer = MaxPool1d(1) #max-pooling layer

    self.conv_layer1 = Conv1d(batch_size, 128, 1)    #first conv layer

    self.conv_bn1 = nn.BatchNorm1d(128) #batch normaliation after first conv

    self.conv_layer2 = Conv1d(128, 256, 1) #second conv layer 

    self.conv_bn2 = nn.BatchNorm1d(256)  #batch normaliation after second conv

    self.flatten_layer = Flatten() #flatten layer to vectorize the data

    self.linear_layer = Linear(256, 64) #linear regression

    self.output_layer = Linear(64, outputs) #output layer
Ejemplo n.º 23
0
 def __init__(self):
     super(CIFAR, self).__init__()
     self.model = Sequential(Conv2d(3, 32, 5, padding=2), MaxPool2d(2),
                             Conv2d(32, 32, 5, padding=2), MaxPool2d(2),
                             Conv2d(32, 64, 5, padding=2), MaxPool2d(2),
                             Flatten(), Linear(64 * 4 * 4, 64),
                             Linear(64, 10))
Ejemplo n.º 24
0
def setup(device):
    """Load MNIST batch, create extended CNN and loss function. Load to device.

    Args:
        device (torch.device): Device that all objects are transferred to.

    Returns:
        inputs, labels, model, loss function
    """
    X, y = load_one_batch_mnist(batch_size=64)
    X, y = X.to(device), y.to(device)

    model = extend(
        Sequential(
            Conv2d(1, 128, 3, padding=1),
            ReLU(),
            MaxPool2d(3, stride=2),
            Conv2d(128, 256, 3, padding=1),
            ReLU(),
            MaxPool2d(3, padding=1, stride=2),
            Conv2d(256, 64, 3, padding=1),
            ReLU(),
            MaxPool2d(3, stride=2),
            Conv2d(64, 32, 3, padding=1),
            ReLU(),
            MaxPool2d(3, stride=2),
            Flatten(),
            Linear(32, 10),
        ).to(device)
    )

    lossfunc = extend(CrossEntropyLoss().to(device))

    return X, y, model, lossfunc
Ejemplo n.º 25
0
    def _make_cnn(
        conv_cls: Type[Module], output_dim: int, conv_params: Tuple
    ) -> Sequential:
        linear = Linear(output_dim, 5)
        set_requires_grad(linear, False)

        return Sequential(conv_cls(*conv_params), ReLU(), Flatten(), linear)
Ejemplo n.º 26
0
    def __init__(self,
                 input_size,
                 channel_last=True,
                 hidden_size=50,
                 **kwargs):

        # Defaults
        self.input_length = 20
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_classes = 2
        self.label_embedding_size = self.num_classes
        self.prob_classes = torch.ones(self.num_classes)
        self.dropout = 0.5
        self.label_type = 'required'
        self.channel_last = channel_last

        # Set kwargs (might overried above attributes)
        for key, value in kwargs.items():
            setattr(self, key, value)

        super(CNNCGANDiscriminator, self).__init__(self.input_size,
                                                   self.label_type)

        # Build CNN layer
        self.label_embeddings = nn.Embedding(self.num_classes,
                                             self.label_embedding_size)

        Conv1d_ = lambda k: Conv1d(hidden_size, hidden_size, k)
        layers_input_size = self.input_size + self.label_embedding_size
        layers_output_size = 5 * hidden_size
        ## Build CNN layer
        self.layers = nn.Sequential(
            Conv1d(layers_input_size, hidden_size, 1),
            LeakyReLU(0.2),
            # output size: (-1, 50, 20)
            Conv1d_(3),
            ReplicationPad1d(1),
            LeakyReLU(0.2),
            Conv1d_(3),
            ReplicationPad1d(1),
            LeakyReLU(0.2),
            AvgPool1d(2, 2),
            # output size: (-1, 50, 10)
            Conv1d_(3),
            ReplicationPad1d(1),
            LeakyReLU(0.2),
            Conv1d_(3),
            ReplicationPad1d(1),
            LeakyReLU(0.2),
            AvgPool1d(2, 2),
            # output size: (-1, 50, 5)
            Flatten(),
            Linear(layers_output_size, 1)
            # output size: (-1, 1)
        )

        # Initialize all weights.
        self._weight_initializer()
Ejemplo n.º 27
0
 def __init__(self):
     super(ConvNet, self).__init__()
     self.input_layer = Conv1d(3, 10, 1)
     self.max_pooling_layer = MaxPool1d(1)
     self.conv_layer = Conv1d(10, 50, 1)
     self.flatten_layer = Flatten()
     self.linear_layer = Linear(50, 50)
     self.output_layer = Linear(50, 1)
    def __init__(self, dim_z=latent_dim):
        super(ConditionalEncoder, self).__init__()
        self.dim_z = dim_z
        kernel_size = 3
        stride = 2
        padding = self.same_padding(kernel_size)

        self.conv0 = Sequential(
            Conv2d(colors_dim, 16, kernel_size=1, stride=1),
            LeakyReLU(negative_slope=negative_slope),
        )
        self.conv1 = Sequential(
            Conv2d(16,
                   32,
                   kernel_size=kernel_size,
                   stride=stride,
                   padding=padding),
            BatchNorm2d(32, momentum=momentum),
            LeakyReLU(negative_slope=negative_slope),
        )
        self.conv2 = Sequential(
            Conv2d(32,
                   64,
                   kernel_size=kernel_size,
                   stride=stride,
                   padding=padding),
            BatchNorm2d(64, momentum=momentum),
            LeakyReLU(negative_slope=negative_slope),
        )
        self.conv3 = Sequential(
            Conv2d(64,
                   128,
                   kernel_size=kernel_size,
                   stride=stride,
                   padding=padding),
            BatchNorm2d(128, momentum=momentum),
            LeakyReLU(negative_slope=negative_slope),
            Flatten(),  # next layer takes flat input with labels appended
        )
        self.dense1 = Sequential(Linear(8192, 2048),
                                 BatchNorm1d(2048, momentum=momentum),
                                 LeakyReLU(negative_slope=negative_slope))
        self.dense2 = Sequential(Linear(2048, self.dim_z),
                                 BatchNorm1d(self.dim_z, momentum=momentum),
                                 LeakyReLU(negative_slope=negative_slope))
        self.embedding = Sequential(
            Linear(labels_dim, self.dim_z),
            BatchNorm1d(self.dim_z, momentum=momentum),
            LeakyReLU(negative_slope=negative_slope),
        )

        ## the following take the same input from dense1
        self.dense_z_mu = Linear(128 * 2, self.dim_z)
        self.dense_z_std = Sequential(
            Linear(self.dim_z * 2, self.dim_z),
            Softplus(),
        )
        self.set_optimizer(optimizer, lr=learning_rate, betas=betas)
Ejemplo n.º 29
0
 def __init__(self, in_channel, reduction_ratio):
     super(ChannelFilter, self).__init__()
     self.in_channel = in_channel
     self.mlp = nn.Sequential(
         Flatten(),
         nn.Linear(in_channel, in_channel // reduction_ratio),
         nn.ReLU(),
         nn.Linear(in_channel // reduction_ratio, in_channel)
     )
	def __init__(self, in_dims=None, num_classes=None, num_samples=0, **kwargs):

		super().__init__()
		self.save_hyperparameters()

		actfunc = torch.nn.LeakyReLU
		bias = True
		prior = [1., 'laplace'][0]
		if self.hparams.model == 'bnn':
			in_features = prod(in_dims)
			self.bnn = Sequential(	Flatten(1, -1),
						MC_ExpansionLayer(num_MC=self.hparams.num_MC, input_dim=2),
						BayesLinear(in_features,self.hparams.num_hidden, prior=prior, bias=bias),
						actfunc(),
						BayesLinear(self.hparams.num_hidden,self.hparams.num_hidden,prior=prior, bias=bias),
						actfunc(),
						BayesLinear(self.hparams.num_hidden,self.hparams.num_hidden, prior=prior, bias=bias),
						actfunc(),
						# BayesLinear(self.hparams.num_hidden,self.hparams.num_hidden, prior=prior, bias=bias),
						# actfunc(),
						# BayesLinear(self.hparams.num_hidden,self.hparams.num_hidden, prior=prior, bias=bias),
						# actfunc(),
						BayesLinear(self.hparams.num_hidden,num_classes, prior=prior, bias=bias)
						# BayesLinear(self.hparams.num_hidden + 1,num_classes, prior=prior)
						)

		if self.hparams.model == 'cbnn':
			debug = 1
			layer_args = {'kernel_size': 5, 'padding': 2, 'stride': 1, 'num_MC': self.hparams.num_MC}
			self.bnn = Sequential(	MC_ExpansionLayer(num_MC=self.hparams.num_MC, input_dim=4),
						# PrintModule(),
						BayesConv2d(in_channels=in_dims[0], out_channels=int(96/debug), **layer_args),
						MC_BatchNorm2D(int(96/debug)),
						actfunc(),
						MC_MaxPool2d(kernel_size=2, stride=2),
						BayesConv2d(in_channels=int(96/debug), out_channels=int(128/debug), **layer_args),
						MC_BatchNorm2D(int(128/debug)),
						actfunc(),
						MC_MaxPool2d(kernel_size=2, stride=2),
						BayesConv2d(in_channels=int(128/debug), out_channels=int(256/debug), **layer_args),
						MC_BatchNorm2D(int(256/debug)),
						actfunc(),
						MC_MaxPool2d(kernel_size=2, stride=2),
						BayesConv2d(in_channels=int(256/debug), out_channels=int(128/debug), **layer_args),
						MC_BatchNorm2D(int(128/debug)),
						actfunc(),
						MC_MaxPool2d(kernel_size=2, stride=2),
						BayesAdaptiveInit_FlattenAndLinear(self.hparams.num_hidden),
						actfunc(),
						BayesLinear(self.hparams.num_hidden, num_classes)
			)
			self.bnn(torch.randn(1, *in_dims, dtype=torch.float32))

		self.criterion = MC_CrossEntropyLoss(num_samples=self.hparams.num_samples)

		self.summary = ModelSummary(model=self)
		self.num_params = ModelSummary(model=self).param_nums[0]