def reset_parameters(self): if self.layer_size == -1: self.layer_size = self.in_size self.n = self.layer_size self.LDR1 = sl.StructuredLinear(self.class1, layer_size=self.channels*self.n, r=self.rank1, bias=True) self.LDR2 = sl.StructuredLinear(self.class2,layer_size=self.channels*self.n, r=self.rank2, bias=True) self.logits = nn.Linear(self.fc_size, 10)
def reset_parameters(self): if self.layer_size == -1: self.layer_size = self.in_size if self.hidden_size == -1: self.hidden_size = self.in_size self.W = sl.StructuredLinear(self.class_type, layer_size=self.layer_size, r=self.r, bias=self.bias, hidden_size=self.hidden_size)
def reset_parameters(self): if self.layer_size == -1: self.layer_size = self.in_size layers = [] for layer in range(self.num_layers): layers.append(sl.StructuredLinear(self.class_type,layer_size=self.layer_size, r=self.r, bias=self.bias)) self.layers = nn.ModuleList(layers) self.W2 = nn.Linear(self.layer_size, self.out_size)
def __init__(self): super(VAE, self).__init__() self.fc1 = sl.StructuredLinear(class_type=args.class_type, layer_size=args.layer_size, r=args.r, bias=False) self.fc21 = nn.Linear(784, 20) self.fc22 = nn.Linear(784, 20) self.fc3 = nn.Linear(20, 400) self.fc4 = nn.Linear(400, 784)
def reset_parameters(self): self.layer_size = int(self.in_size/3) if self.hidden_size == -1: self.hidden_size = self.layer_size self.d = int(np.sqrt(self.layer_size)) self.conv1 = nn.Conv2d(3, 6, 5, padding=2) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5, padding=2) self.W = sl.StructuredLinear(self.class_type, layer_size=self.layer_size, r=self.r, bias=self.bias, hidden_size=self.hidden_size) self.logits = nn.Linear(self.hidden_size, self.out_size)
def reset_parameters(self): if self.layer_size == -1: self.layer_size = self.in_size if self.hidden_size == -1: self.hidden_size = self.layer_size assert self.layer_size == self.in_size self.d = int(np.sqrt(self.layer_size)) self.conv1 = nn.Conv2d(1, 6, 5, padding=2) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5, padding=2) layers = [] layers.append(sl.StructuredLinear(self.class_type, layer_size=self.layer_size, r=self.r, bias=self.bias, hidden_size=self.hidden_size)) self.layers = nn.ModuleList(layers) self.logits = nn.Linear(self.hidden_size, self.out_size)
def reset_parameters(self): self.n = 1024 self.fc_size = self.n // 2 if self.channels: self.LDR1 = ldr.LDR(self.class1, 3, 3, self.rank1, self.n) else: self.LDR1 = sl.StructuredLinear(self.class1, layer_size=3*self.n, r=self.rank1) self.LDR211 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2) self.LDR212 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2) self.LDR221 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2) self.LDR222 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2) self.LDR231 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2) self.LDR232 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2) self.b = Parameter(torch.zeros(self.fc_size)) self.logits = nn.Linear(self.fc_size, 10)
def __init__(self, class_type, r, input_size, hidden_size, use_bias=True): super(LSTMCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.use_bias = use_bias self.class_type = class_type self.r = r # Replace W_ih with structured matrices self.W_ih = sl.StructuredLinear(class_type, layer_size=4 * hidden_size, r=r, bias=False) self.W_hh = nn.Parameter( torch.FloatTensor(hidden_size, 4 * hidden_size)) if use_bias: self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size)) else: self.bias = None self.reset_parameters()
def reset_parameters(self): if self.layer_size == -1: self.layer_size = self.in_size self.W = sl.StructuredLinear(self.class_type, layer_size=self.layer_size, r=self.r) self.fc = nn.Linear(3*1024, self.fc_size) self.logits = nn.Linear(self.fc_size, 10)