Beispiel #1
0
    def __init__(
        self,
        input_width,
        input_height,
        input_channels,
        hidden_sizes,
        activation="relu",
        activation_out="logsoftmax",
        n_out=2,
    ):
        super(FCNClassifierModel, self).__init__()

        self.input_width = input_width
        self.input_height = input_height
        self.input_channels = input_channels
        self.hidden_sizes = hidden_sizes
        self.fc_activations = [activation for i in range(len(hidden_sizes))
                               ] + [activation_out]
        self.n_out = n_out

        # get flattend input size
        x = torch.randn(self.input_channels, self.input_width,
                        self.input_height).view(-1, self.input_channels,
                                                self.input_width,
                                                self.input_height)
        self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

        # define fully connected layers
        fc_args = dict(input_size=self._to_linear,
                       hidden_sizes=self.hidden_sizes,
                       output_size=self.n_out,
                       activations=self.fc_activations)
        fc_parameters = u.get_fc_layers(**fc_args)
        self.fc_layers = u.build_layers(fc_parameters)
    def __init__(
        self,
        input_width,
        input_height,
        input_channels,
        hidden_sizes,
        activation="relu",
        activation_out="relu",
    ):
        super(FCAutoencoderModel, self).__init__()

        self.input_width = input_width
        self.input_height = input_height
        self.input_channels = input_channels
        self.hidden_sizes = hidden_sizes
        self.fc_encoder_activations = [
            activation for i in range(len(hidden_sizes))
        ]
        self.fc_decoder_activations = [
            activation for i in range(len(hidden_sizes) - 1)
        ] + [activation_out]

        # get flattend input size
        x = torch.randn(self.input_channels, self.input_width,
                        self.input_height).view(-1, self.input_channels,
                                                self.input_width,
                                                self.input_height)
        self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

        # define fully connected layers
        fc_encoder_args = dict(input_size=self._to_linear,
                               hidden_sizes=self.hidden_sizes[:-1],
                               output_size=self.hidden_sizes[-1],
                               activations=self.fc_encoder_activations)
        fc_encoder_parameters = u.get_fc_layers(**fc_encoder_args)
        self.fc_encoder_layers = u.build_layers(fc_encoder_parameters)

        decoder_sizes = self.hidden_sizes[:-1]
        decoder_sizes.reverse()

        fc_decoder_args = dict(input_size=self.hidden_sizes[-1],
                               hidden_sizes=decoder_sizes,
                               output_size=self._to_linear,
                               activations=self.fc_decoder_activations)

        fc_decoder_parameters = u.get_fc_layers(**fc_decoder_args)
        self.fc_decoder_layers = u.build_layers(fc_decoder_parameters)
    def __init__(
        self,
        input_width,
        input_height,
        input_channels,
        hidden_sizes,
        activation="lif",
        activation_out="lif",
        steps=100,
        threshold=1,
        decay=0.99,
        n_out=2,
        device="cuda",
    ):
        super(SFCNModel, self).__init__()

        self.input_width = input_width
        self.input_height = input_height
        self.input_channels = input_channels
        self.hidden_sizes = hidden_sizes
        self.fc_activations = [activation for i in range(len(hidden_sizes))] + [activation_out]
        self.n_out = n_out
        self.device = device
        self.steps = steps

        # get flattend input size
        x = torch.randn(self.input_channels, self.input_width, self.input_height).view(
            -1, self.input_channels, self.input_width, self.input_height
        )
        self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

        # define fully connected layers
        fc_args = dict(
            input_size=self._to_linear,
            hidden_sizes=self.hidden_sizes,
            output_size=self.n_out,
            activations=self.fc_activations,
            thresholds=[threshold for i in range(len(self.fc_activations))],
            decays=[decay for i in range(len(self.fc_activations))],
        )
        fc_parameters = u.get_sfc_layers(**fc_args)
        self.fc_layers = u.build_layers(fc_parameters)
    def __init__(
        self,
        input_width,
        input_height,
        input_channels,
        conv2d_channels,
        hidden_sizes,
        kernel_sizes,
        strides,
        paddings,
        #pooling_kernels,
        #pooling_strides,
        #activation="lif",
        #activation_out="lif",
        pooling="avg",
        steps=100,
        threshold=1,
        decay=0.99,
        adapt_threshold=True,
        threshold_width=0.1,
        delta_threshold=0.0001,
        rho=0.0001,
        epsilon=0.05,
        inactivity_threshold=0,
        delta_w=0.01,
        #pool_threshold=0.75,
        encoder_params={"encoder": "first"},
        decoder_params={"decoder": "max"},
        device="cuda",
        reset=True,
    ):
        super(SCNNAutoencoderModelNew, self).__init__()

        self.input_width = input_width
        self.input_height = input_height
        self.input_channels = input_channels
        self.conv2d_channels = conv2d_channels
        self.kernel_sizes = kernel_sizes
        self.strides = strides
        self.paddings = paddings
        #self.pooling_kernels = pooling_kernels
        #self.pooling_strides = pooling_strides
        self.hidden_sizes = hidden_sizes
        self.device = device
        self.steps = steps
        self.reset = reset

        print(adapt_threshold)

        # define convolutional encoder layers
        sconv_encoder_args = dict(
            input_channels=self.input_channels,
            channels=self.conv2d_channels,
            kernel_sizes=self.kernel_sizes,
            strides=self.strides,
            paddings=self.paddings,
            #activations=[activation for i in range(len(self.conv2d_channels))],
            #pooling_funcs=[pooling for i in range(len(self.conv2d_channels))],
            #pooling_kernels=self.pooling_kernels,
            #pooling_strides=self.pooling_strides,
            thresholds=[threshold for i in range(len(self.conv2d_channels))],
            decays=[decay for i in range(len(self.conv2d_channels))],
            adapt_thresh=adapt_threshold,
            threshold_widths=[threshold_width for i in range(len(self.conv2d_channels))],
            delta_thresholds=[delta_threshold for i in range(len(self.conv2d_channels))],
            rhos=[rho for i in range(len(self.conv2d_channels))],
            epsilons=[epsilon for i in range(len(self.conv2d_channels))],
            inactivity_thresholds=[inactivity_threshold for i in range(len(self.conv2d_channels))],
            delta_ws=[delta_w for i in range(len(self.conv2d_channels))],
            device=self.device,
            reset=self.reset,
            #pool_thresholds=[pool_threshold for i in range(len(self.conv2d_channels))],
        )
        sconv_encoder_parameters = u.get_sconv2dlif_layers(**sconv_encoder_args)
        self.conv_encoder_layers = u.build_layers(sconv_encoder_parameters).to(self.device)

        # get flattend input size
        x = torch.randn(self.input_channels, self.input_width, self.input_height).view(
            -1, self.input_channels, self.input_width, self.input_height
        ).to(self.device)

        x_ = self.conv_encode(x)
        self._to_linear = x_[0].shape[0] * x_[0].shape[1] * x_[0].shape[2]
        self._from_linear = (x_[0].shape[0], x_[0].shape[1], x_[0].shape[2])

        # define fully connected encoder layers
        #self.fc_encoder_activations = [activation for i in range(len(hidden_sizes))]

        sfc_encoder_args = dict(
            input_size=self._to_linear,
            hidden_sizes=self.hidden_sizes[:-1],
            output_size=self.hidden_sizes[-1],
            #activations=self.fc_encoder_activations,
            thresholds=[threshold for i in range(len(self.hidden_sizes))],
            decays=[decay for i in range(len(self.hidden_sizes))],
            adapt_thresh=adapt_threshold,
            threshold_widths=[threshold_width for i in range(len(self.hidden_sizes))],
            delta_thresholds=[delta_threshold for i in range(len(self.hidden_sizes))],
            rhos=[rho for i in range(len(self.hidden_sizes))],
            inactivity_thresholds=[inactivity_threshold for i in range(len(self.hidden_sizes))],
            delta_ws=[delta_w for i in range(len(self.hidden_sizes))],
            epsilons=[epsilon for i in range(len(self.hidden_sizes))],
            device=self.device,
            reset=self.reset,
        )

        sfc_encoder_parameters = u.get_sfclif_layers(**sfc_encoder_args)
        self.fc_encoder_layers = u.build_layers(sfc_encoder_parameters)

        # define fully connected decoder layers
        #self.fc_decoder_activations = [activation for i in range(len(hidden_sizes))]
        decoder_sizes = self.hidden_sizes[:-1]
        decoder_sizes.reverse()

        sfc_decoder_args = dict(
            input_size=self.hidden_sizes[-1],
            hidden_sizes=decoder_sizes,
            output_size=self._to_linear,
            #activations=self.fc_decoder_activations,
            thresholds=[threshold for i in range(len(self.hidden_sizes))],
            decays=[decay for i in range(len(self.hidden_sizes))],
            adapt_thresh=adapt_threshold,
            threshold_widths=[threshold_width for i in range(len(self.hidden_sizes))],
            delta_thresholds=[delta_threshold for i in range(len(self.hidden_sizes))],
            rhos=[rho for i in range(len(self.hidden_sizes))],
            epsilons=[epsilon for i in range(len(self.hidden_sizes))],
            inactivity_thresholds=[inactivity_threshold for i in range(len(self.hidden_sizes))],
            delta_ws=[delta_w for i in range(len(self.hidden_sizes))],
            device=self.device,
            reset=self.reset,
        )

        sfc_decoder_parameters = u.get_sfclif_layers(**sfc_decoder_args)
        self.fc_decoder_layers = u.build_layers(sfc_decoder_parameters)

        # define convolutional decoder layers
        decoder_kernel_sizes = self.kernel_sizes
        decoder_kernel_sizes.reverse()
        decoder_convtranspose2d_channels = [self.input_channels] + self.conv2d_channels[:-1]
        decoder_convtranspose2d_channels.reverse()
        decoder_strides = self.strides
        decoder_strides.reverse()
        decoder_paddings = self.paddings
        decoder_paddings.reverse()
        #unpooling_kernels = self.pooling_kernels
        #unpooling_kernels.reverse()
        #unpooling_strides = self.pooling_strides
        #unpooling_strides.reverse()

        print(adapt_threshold)

        sconv_decoder_args = dict(
            input_channels=x_[0].shape[0],
            channels=decoder_convtranspose2d_channels,
            kernel_sizes=decoder_kernel_sizes,
            strides=decoder_strides,
            paddings=decoder_paddings,
            #activations=[activation for i in range(len(self.conv2d_channels) - 1)] + [activation_out],
            #unpooling_funcs=[pooling for i in range(len(self.conv2d_channels))],
            #unpooling_kernels=unpooling_kernels,
            #unpooling_strides=unpooling_strides,
            thresholds=[threshold for i in range(len(self.conv2d_channels))],
            decays=[decay for i in range(len(self.conv2d_channels))],
            adapt_thresh=adapt_threshold,
            threshold_widths=[threshold_width for i in range(len(self.conv2d_channels))],
            delta_thresholds=[delta_threshold for i in range(len(self.conv2d_channels))],
            rhos=[rho for i in range(len(self.conv2d_channels))],
            epsilons=[epsilon for i in range(len(self.conv2d_channels))],
            inactivity_thresholds=[inactivity_threshold for i in range(len(self.conv2d_channels))],
            delta_ws=[delta_w for i in range(len(self.conv2d_channels))],
            device=self.device,
            reset=self.reset,
            #pool_thresholds=[pool_threshold for i in range(len(self.conv2d_channels))],
        )
        sconv_decoder_parameters = u.get_sconvtranspose2dlif_layers(**sconv_decoder_args)
        self.conv_decoder_layers = u.build_layers(sconv_decoder_parameters)

        # initialize input encoder
        self.input_encoder = get_input_encoder(**encoder_params)

        # initialize output decoder
        self.output_decoder = get_output_decoder(**decoder_params)
    def __init__(
        self,
        input_width,
        input_height,
        input_channels,
        conv2d_channels,
        hidden_sizes,
        kernel_sizes,
        strides,
        paddings,
        pooling_kernels,
        pooling_strides,
        encoder_params={"encoder": "noisy"},
        activation="relu",
        activation_out="logsoftmax",
        pooling="avg",
    ):
        super(CNNAutoencoderModel, self).__init__()

        self.input_width = input_width
        self.input_height = input_height
        self.input_channels = input_channels
        self.conv2d_channels = conv2d_channels
        self.kernel_sizes = kernel_sizes
        self.strides = strides
        self.paddings = paddings
        self.pooling_kernels = pooling_kernels
        self.pooling_strides = pooling_strides
        self.hidden_sizes = hidden_sizes
        # self.fc_activations = [activation for i in range(len(hidden_sizes))] + [activation_out]

        # define convolutional encoder layers
        conv_encoder_args = dict(
            input_channels=self.input_channels,
            channels=self.conv2d_channels,
            kernel_sizes=self.kernel_sizes,
            strides=self.strides,
            paddings=self.paddings,
            activations=[activation for i in range(len(self.conv2d_channels))],
            pooling_funcs=[pooling for i in range(len(self.conv2d_channels))],
            pooling_kernels=self.pooling_kernels,
            pooling_strides=self.pooling_strides,
        )
        conv_encoder_parameters = u.get_conv2d_layers(**conv_encoder_args)
        self.conv_encoder_layers = u.build_layers(conv_encoder_parameters)

        # get flattend input size and reverse transformation
        x = torch.randn(self.input_channels, self.input_width, self.input_height).view(
            -1, self.input_channels, self.input_width, self.input_height
        )
        x_ = self.conv_encode(x)
        self._to_linear = x_[0].shape[0] * x_[0].shape[1] * x_[0].shape[2]
        self._from_linear = (x_[0].shape[0], x_[0].shape[1], x_[0].shape[2])

        # define fully connected encoder layers
        self.fc_encoder_activations = [activation for i in range(len(hidden_sizes))]

        fc_encoder_args = dict(
            input_size=self._to_linear,
            hidden_sizes=self.hidden_sizes[:-1],
            output_size=self.hidden_sizes[-1],
            activations=self.fc_encoder_activations,
        )
        fc_encoder_parameters = u.get_fc_layers(**fc_encoder_args)
        self.fc_encoder_layers = u.build_layers(fc_encoder_parameters)

        # define fully connected decoder layers
        self.fc_decoder_activations = [activation for i in range(len(hidden_sizes))]
        decoder_sizes = self.hidden_sizes[:-1]
        decoder_sizes.reverse()

        fc_decoder_args = dict(
            input_size=self.hidden_sizes[-1],
            hidden_sizes=decoder_sizes,
            output_size=self._to_linear,
            activations=self.fc_decoder_activations,
        )

        fc_decoder_parameters = u.get_fc_layers(**fc_decoder_args)
        self.fc_decoder_layers = u.build_layers(fc_decoder_parameters)

        # define convolutional decoder layers
        decoder_kernel_sizes = self.kernel_sizes
        decoder_kernel_sizes.reverse()
        decoder_convtranspose2d_channels = [self.input_channels] + self.conv2d_channels[:-1]
        decoder_convtranspose2d_channels.reverse()
        decoder_strides = self.strides
        decoder_strides.reverse()
        decoder_paddings = self.paddings
        decoder_paddings.reverse()
        unpooling_kernels = self.pooling_kernels
        unpooling_kernels.reverse()
        unpooling_strides = self.pooling_strides
        unpooling_strides.reverse()

        conv_decoder_args = dict(
            input_channels=x_[0].shape[0],
            channels=decoder_convtranspose2d_channels,
            kernel_sizes=decoder_kernel_sizes,
            strides=decoder_strides,
            paddings=decoder_paddings,
            activations=[activation for i in range(len(self.conv2d_channels) - 1)] + [activation_out],
            unpooling_funcs=[pooling for i in range(len(self.conv2d_channels))],
            unpooling_kernels=unpooling_kernels,
            unpooling_strides=unpooling_strides,
        )
        conv_decoder_parameters = u.get_convtranspose2d_layers(**conv_decoder_args)
        self.conv_decoder_layers = u.build_layers(conv_decoder_parameters)

        # initialize input encoder
        self.input_encoder = get_input_encoder(**encoder_params)
Beispiel #6
0
    def __init__(
        self,
        input_width,
        input_height,
        input_channels,
        conv2d_channels,
        hidden_sizes,
        kernel_sizes,
        strides,
        paddings,
        pooling_kernels,
        pooling_strides,
        activation="relu",
        activation_out="logsoftmax",
        pooling="avg",
        n_out=2,
    ):
        super(CNNModel, self).__init__()

        self.input_width = input_width
        self.input_height = input_height
        self.input_channels = input_channels
        self.conv2d_channels = conv2d_channels
        self.kernel_sizes = kernel_sizes
        self.strides = strides
        self.paddings = paddings
        self.pooling_kernels = pooling_kernels
        self.pooling_strides = pooling_strides
        self.hidden_sizes = hidden_sizes
        self.fc_activations = [activation for i in range(len(hidden_sizes))
                               ] + [activation_out]
        self.n_out = n_out

        # define convolutional layers
        conv_args = dict(
            input_channels=self.input_channels,
            channels=self.conv2d_channels,
            kernel_sizes=self.kernel_sizes,
            strides=self.strides,
            activations=[activation for i in range(len(self.conv2d_channels))],
            pooling_funcs=[pooling for i in range(len(self.conv2d_channels))],
            pooling_kernels=self.pooling_kernels,
            pooling_strides=self.pooling_strides,
        )
        conv_parameters = u.get_conv2d_layers(**conv_args)
        self.conv_layers = u.build_layers(conv_parameters)

        # get flattend input size
        x = torch.randn(self.input_channels, self.input_width,
                        self.input_height).view(-1, self.input_channels,
                                                self.input_width,
                                                self.input_height)
        x_ = self.convs(x)
        self._to_linear = x_[0].shape[0] * x_[0].shape[1] * x_[0].shape[2]

        # define fully connected layers
        fc_args = dict(input_size=self._to_linear,
                       hidden_sizes=self.hidden_sizes,
                       output_size=self.n_out,
                       activations=self.fc_activations)
        fc_parameters = u.get_fc_layers(**fc_args)
        self.fc_layers = u.build_layers(fc_parameters)
    def __init__(
        self,
        input_width,
        input_height,
        input_channels,
        conv2d_channels,
        hidden_sizes,
        kernel_sizes,
        strides,
        paddings,
        pooling_kernels,
        pooling_strides,
        activation="lif",
        activation_out="lif",
        pooling="avg",
        steps=100,
        threshold=1,
        decay=0.99,
        pool_threshold=0.75,
        n_out=2,
        device="cuda",
    ):
        super(SCNNModel, self).__init__()

        self.input_width = input_width
        self.input_height = input_height
        self.input_channels = input_channels
        self.conv2d_channels = conv2d_channels
        self.kernel_sizes = kernel_sizes
        self.strides = strides
        self.paddings = paddings
        self.pooling_kernels = pooling_kernels
        self.pooling_strides = pooling_strides
        self.hidden_sizes = hidden_sizes
        self.fc_activations = [activation for i in range(len(hidden_sizes))
                               ] + [activation_out]
        self.n_out = n_out
        self.device = device
        self.steps = steps

        # define convolutional layers
        conv_args = dict(
            input_channels=self.input_channels,
            channels=self.conv2d_channels,
            kernel_sizes=self.kernel_sizes,
            strides=self.strides,
            paddings=self.paddings,
            activations=[activation for i in range(len(self.conv2d_channels))],
            pooling_funcs=[pooling for i in range(len(self.conv2d_channels))],
            pooling_kernels=self.pooling_kernels,
            pooling_strides=self.pooling_strides,
            thresholds=[threshold for i in range(len(self.conv2d_channels))],
            decays=[decay for i in range(len(self.conv2d_channels))],
            pool_thresholds=[
                pool_threshold for i in range(len(self.conv2d_channels))
            ],
        )
        sconv_parameters = u.get_sconv2d_layers(**conv_args)
        self.conv_layers = u.build_layers(sconv_parameters)

        # get flattend input size
        x = torch.randn(self.input_channels, self.input_width,
                        self.input_height).view(-1, self.input_channels,
                                                self.input_width,
                                                self.input_height)
        x_ = self.convs(x)
        self._to_linear = x_[0].shape[0] * x_[0].shape[1] * x_[0].shape[2]

        # define fully connected layers
        fc_args = dict(
            input_size=self._to_linear,
            hidden_sizes=self.hidden_sizes,
            output_size=self.n_out,
            activations=self.fc_activations,
            thresholds=[threshold for i in range(len(self.fc_activations))],
            decays=[decay for i in range(len(self.fc_activations))],
        )

        fc_parameters = u.get_sfc_layers(**fc_args)
        self.fc_layers = u.build_layers(fc_parameters)