Beispiel #1
0
    def __init__(self,
                 input_size=28,
                 hidden_size=100,
                 output=10,
                 tasks=8,
                 s_init=False,
                 beta=False):
        super(ATwoLayer, self).__init__()

        # A bunch of convolutions one after another
        # self.l1 = ALinear(784, hidden_size, datasets=tasks, same_init=s_init, Beta=beta)
        self.l1 = ALinear(input_size,
                          hidden_size,
                          datasets=tasks,
                          same_init=s_init,
                          Beta=beta)
        self.l2 = ALinear(hidden_size,
                          hidden_size,
                          datasets=tasks,
                          same_init=s_init,
                          Beta=beta)
        self.l3 = ALinear(hidden_size,
                          hidden_size,
                          datasets=tasks,
                          same_init=s_init,
                          Beta=beta)
        self.l4 = ALinear(hidden_size,
                          output,
                          datasets=tasks,
                          same_init=s_init,
                          Beta=beta)
        self.relu = nn.ReLU()
        self.ls = nn.LogSoftmax(dim=1)
Beispiel #2
0
    def __init__(self,
                 data_dim,
                 hidden_dim,
                 augment_dim=0,
                 time_dependent=False,
                 non_linearity='relu'):
        super(AODEFunc, self).__init__()
        self.augment_dim = augment_dim
        self.data_dim = data_dim
        self.input_dim = data_dim + augment_dim
        self.hidden_dim = hidden_dim
        self.nfe = 0  # Number of function evaluations
        self.time_dependent = time_dependent

        if time_dependent:
            self.fc1 = ALinear(self.input_dim + 1, hidden_dim)
        else:
            self.fc1 = ALinear(self.input_dim, hidden_dim)
        self.fc2 = ALinear(hidden_dim, hidden_dim)
        self.fc3 = ALinear(hidden_dim, self.input_dim)

        if non_linearity == 'relu':
            self.non_linearity = nn.ReLU(inplace=True)
        elif non_linearity == 'softplus':
            self.non_linearity = nn.Softplus()
Beispiel #3
0
    def __init__(self, input_shape, tasks=1):
        """
        :param input_shape: input image shape, (h, w, c)
        """
        super(ANet2, self).__init__()

        self.features = Sequential(
            AConv2d(input_shape[-1], 64, kernel_size=10, datasets=tasks),
            ReLU(), MaxPool2d(kernel_size=(2, 2), stride=2),
            AConv2d(64, 128, kernel_size=7, datasets=tasks), ReLU(),
            MaxPool2d(kernel_size=(2, 2), stride=2),
            AConv2d(128, 128, kernel_size=4, datasets=tasks), ReLU(),
            MaxPool2d(kernel_size=(2, 2), stride=2),
            AConv2d(128, 256, kernel_size=4, datasets=tasks), ReLU())

        # self.features.forward = mod_forward

        # Compute number of input features for the last fully-connected layer
        input_shape = (1, ) + input_shape[::-1]
        x = Variable(torch.rand(input_shape), requires_grad=False)
        x = mod_forward(x, 0, self.features)
        x = Flatten()(x)
        n = x.size()[1]

        self.classifier = ALinear(n, 4096, datasets=tasks)
Beispiel #4
0
    def __init__(self, input_shape, tasks=1):
        """
        :param input_shape: input image shape, (h, w, c)
        """
        super(ANet, self).__init__()

        self.conv1 = AConv2d(input_shape[-1],
                             64,
                             kernel_size=10,
                             datasets=tasks)
        self.mp1 = MaxPool2d(kernel_size=(2, 2), stride=2)

        self.conv2 = AConv2d(64, 128, kernel_size=7, datasets=tasks)
        self.mp2 = MaxPool2d(kernel_size=(2, 2), stride=2)

        self.conv3 = AConv2d(128, 128, kernel_size=4, datasets=tasks)
        self.mp3 = MaxPool2d(kernel_size=(2, 2), stride=2)

        self.conv4 = AConv2d(128, 256, kernel_size=4, datasets=tasks)

        self.relu = nn.ReLU()

        # Compute number of input features for the last fully-connected layer
        input_shape = (1, ) + input_shape[::-1]
        x = Variable(torch.rand(input_shape), requires_grad=False)
        x = self.mp1(self.relu(self.conv1(x, 0)))
        x = self.mp2(self.relu(self.conv2(x, 0)))
        x = self.mp3(self.relu(self.conv3(x, 0)))
        x = self.relu(self.conv4(x, 0))

        x = Flatten()(x)
        n = x.size()[1]

        self.linear = ALinear(n, 4096, datasets=tasks)
        self.sm = Sigmoid()
Beispiel #5
0
    def __init__(self,
                 img_size,
                 num_filters,
                 output_dim=1,
                 augment_dim=0,
                 time_dependent=False,
                 non_linearity='relu',
                 tol=1e-3,
                 adjoint=False):
        super(AConvODENet, self).__init__()
        self.img_size = img_size
        self.num_filters = num_filters
        self.augment_dim = augment_dim
        self.output_dim = output_dim
        self.flattened_dim = (img_size[0] +
                              augment_dim) * img_size[1] * img_size[2]
        self.time_dependent = time_dependent
        self.tol = tol

        odefunc = AConvODEFunc(img_size, num_filters, augment_dim,
                               time_dependent, non_linearity)

        self.odeblock = AODEBlock(odefunc,
                                  is_conv=True,
                                  tol=tol,
                                  adjoint=adjoint)

        self.linear_layer = ALinear(self.flattened_dim, self.output_dim)
Beispiel #6
0
    def __init__(self, input_shape, tasks=1):
        """
        :param input_shape: input image shape, (h, w, c)
        """
        super(ASiameseNetworks, self).__init__()
        self.net = ANet(input_shape)

        self.classifier = ALinear(4096, 1, bias=False, datasets=tasks)
Beispiel #7
0
    def __init__(self,
                 layer_size=64,
                 output_shape=55,
                 input_size=784,
                 num_channels=1,
                 keep_prob=1.0,
                 image_size=28,
                 tasks=1,
                 bn_boole=False):
        super(ClassifierMLP, self).__init__()
        """
        Build a CNN to produce embeddings
        :param layer_size:64(default)
        :param num_channels:
        :param keep_prob:
        :param image_size:
        """
        self.conv1 = ALinear(num_channels * input_size,
                             layer_size,
                             datasets=tasks)
        self.conv2 = ALinear(layer_size, layer_size, datasets=tasks)
        self.conv3 = ALinear(layer_size, layer_size, datasets=tasks)
        self.conv4 = ALinear(layer_size, layer_size, datasets=tasks)

        self.bn1 = nn.ModuleList(
            [nn.BatchNorm1d(layer_size) for j in range(tasks)])
        self.bn2 = nn.ModuleList(
            [nn.BatchNorm1d(layer_size) for j in range(tasks)])
        self.bn3 = nn.ModuleList(
            [nn.BatchNorm1d(layer_size) for j in range(tasks)])
        self.bn4 = nn.ModuleList(
            [nn.BatchNorm1d(layer_size) for j in range(tasks)])

        self.do = nn.Dropout(keep_prob)
        self.relu = nn.ReLU()
        self.sm = nn.Sigmoid()

        self.outSize = layer_size

        self.linear = ALinear(layer_size,
                              output_shape,
                              datasets=tasks,
                              multi=True)
        self._weight_init()
Beispiel #8
0
 def __init__(self,
              layer_size=64,
              num_channels=1,
              keep_prob=1.0,
              image_size=28,
              tasks=1):
     super(OmniV, self).__init__()
     self.classifier = Classifier(layer_size, num_channels, keep_prob,
                                  image_size, tasks)
     # self.linear = ALinear(self.classifier.outSize,1, datasets = tasks)
     self.linear = ALinear(28 * 28, 1, datasets=tasks)
Beispiel #9
0
    def __init__(self,
                 layer_size=64,
                 output_shape=55,
                 num_channels=1,
                 keep_prob=1.0,
                 image_size=28,
                 tasks=1,
                 bn_boole=False):
        super(Classifier, self).__init__()
        """
        Build a CNN to produce embeddings
        :param layer_size:64(default)
        :param num_channels:
        :param keep_prob:
        :param image_size:
        """
        self.conv1 = AConv2d(num_channels, layer_size, 3, 1, 1, datasets=tasks)
        self.conv2 = AConv2d(layer_size, layer_size, 3, 1, 1, datasets=tasks)
        self.conv3 = AConv2d(layer_size, layer_size, 3, 1, 1, datasets=tasks)
        self.conv4 = AConv2d(layer_size, layer_size, 3, 1, 1, datasets=tasks)

        self.bn1 = nn.ModuleList(
            [nn.BatchNorm2d(layer_size) for j in range(tasks)])
        self.bn2 = nn.ModuleList(
            [nn.BatchNorm2d(layer_size) for j in range(tasks)])
        self.bn3 = nn.ModuleList(
            [nn.BatchNorm2d(layer_size) for j in range(tasks)])
        self.bn4 = nn.ModuleList(
            [nn.BatchNorm2d(layer_size) for j in range(tasks)])

        self.mp1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.mp2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.mp3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.mp4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.do = nn.Dropout(keep_prob)
        self.relu = nn.ReLU()
        self.sm = nn.Sigmoid()

        finalSize = int(math.floor(image_size / (2 * 2 * 2 * 2)))
        self.outSize = finalSize * finalSize * layer_size

        self.linear = ALinear(self.outSize,
                              output_shape,
                              datasets=tasks,
                              multi=True)
        self._weight_init()
Beispiel #10
0
    def __init__(self,
                 layer_size=64,
                 num_channels=2,
                 keep_prob=1.0,
                 image_size=28,
                 tasks=1):
        super(Classifier2, self).__init__()
        """
        Build a CNN to produce embeddings
        :param layer_size:64(default)
        :param num_channels:
        :param keep_prob:
        :param image_size:
        """
        self.conv1 = AConv2d(num_channels, layer_size, 3, 1, 1, datasets=tasks)
        self.conv2 = AConv2d(layer_size, layer_size, 3, 1, 1, datasets=tasks)
        self.conv3 = AConv2d(layer_size, layer_size, 3, 1, 1, datasets=tasks)
        self.conv4 = AConv2d(layer_size, layer_size, 3, 1, 1, datasets=tasks)

        self.bn1 = nn.BatchNorm2d(layer_size)
        self.bn2 = nn.BatchNorm2d(layer_size)
        self.bn3 = nn.BatchNorm2d(layer_size)
        self.bn4 = nn.BatchNorm2d(layer_size)

        self.mp1 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.mp2 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.mp3 = nn.MaxPool2d(kernel_size=2, stride=2)
        self.mp4 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.do = nn.Dropout(keep_prob)
        self.relu = nn.ReLU()
        self.sm = nn.Sigmoid()

        finalSize = int(math.floor(image_size / (2 * 2 * 2 * 2)))
        self.outSize = finalSize * finalSize * layer_size

        # self.linear = ALinear(self.outSize, 1, datasets=tasks)
        self.linear = ALinear(64, 1, datasets=tasks)
Beispiel #11
0
    def __init__(self,
                 data_dim,
                 hidden_dim,
                 output_dim=1,
                 augment_dim=0,
                 time_dependent=False,
                 non_linearity='relu',
                 tol=1e-3,
                 adjoint=False):
        super(AODENet, self).__init__()
        self.data_dim = data_dim
        self.hidden_dim = hidden_dim
        self.augment_dim = augment_dim
        self.output_dim = output_dim
        self.time_dependent = time_dependent
        self.tol = tol

        odefunc = AODEFunc(data_dim, hidden_dim, augment_dim, time_dependent,
                           non_linearity)

        self.odeblock = AODEBlock(odefunc, tol=tol, adjoint=adjoint)
        self.linear_layer = ALinear(self.odeblock.odefunc.input_dim,
                                    self.output_dim)