def __init__(self): super(LeNet5, self).__init__() self.num_classes = 10 self.conv1 = layers.ConvVDO(1, 20, 5, padding=0, alpha_shape=(1, 1, 1, 1)) self.relu1 = nn.ReLU(True) self.pool1 = nn.MaxPool2d(2, padding=0) self.conv2 = layers.ConvVDO(20, 50, 5, padding=0, alpha_shape=(1, 1, 1, 1)) self.relu2 = nn.ReLU(True) self.pool2 = nn.MaxPool2d(2, padding=0) self.flatten = layers.FlattenLayer(800) self.dense1 = layers.LinearVDO(800, 500) self.bn1 = nn.BatchNorm1d(500) self.relu3 = nn.ReLU() self.dense2 = layers.LinearVDO(500, 10)
def __init__(self): super(LeNet5, self).__init__() self.num_classes = 10 if flags.tanh: nonlinearity = nn.Tanh else: nonlinearity = nn.ReLU # Conv-BN-Tanh-Pool if flags.vdo1: self.conv1 = layers.ConvVDO(1, 20, 5, alpha_shape=(1, 1), padding=0, bias=False) else: self.conv1 = nn.Conv2d(1, 20, 5, padding=0, bias=False) self.bn1 = nn.BatchNorm2d(20, affine=not flags.no_biases) self.relu1 = nonlinearity() self.pool1 = nn.MaxPool2d(2, padding=0) # Conv-BN-Tanh-Pool if flags.vdo2: self.conv2 = layers.ConvVDO(20, 50, 5, alpha_shape=(1, 1), padding=0, bias=False) else: self.conv2 = nn.Conv2d(20, 50, 5, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(50, affine=not flags.no_biases) self.relu2 = nonlinearity() self.pool2 = nn.MaxPool2d(2, padding=0) self.flatten = layers.FlattenLayer(800) if not (flags.vdo1 or flags.vdo2 or flags.vdo3 or flags.vdo4): self.do1 = nn.Dropout(0.5) # Dense-BN-Tanh if flags.vdo3: self.dense1 = layers.LinearVDO(800, 500, alpha_shape=(1, 1), bias=False) else: self.dense1 = nn.Linear(800, 500, bias=False) self.bn3 = nn.BatchNorm1d(500, affine=not flags.no_biases) self.relu3 = nonlinearity() # Dense if flags.vdo4: self.dense2 = layers.LinearVDO(500, 10, alpha_shape=(1, 1), bias=False) else: self.dense2 = nn.Linear(500, 10, bias=False)
def __init__(self): super(LeNet5, self).__init__() self.num_classes = 10 self.conv1 = nn.Conv2d(1, 20, 5, padding=0) self.relu1 = nn.ReLU(True) self.pool1 = nn.MaxPool2d(2, padding=0) self.conv2 = nn.Conv2d(20, 50, 5, padding=0) self.relu2 = nn.ReLU(True) self.pool2 = nn.MaxPool2d(2, padding=0) self.flatten = layers.FlattenLayer(800) self.do1 = nn.Dropout(0.5) self.dense1 = nn.Linear(800, 500) self.relu3 = nn.ReLU() self.dense2 = nn.Linear(500, 10)
def __init__(self): super(LeNet5, self).__init__() self.num_classes = 10 if flags.tanh: nonlinearity = nn.Tanh else: nonlinearity = nn.ReLU # Conv-BN-Tanh-Pool if flags.var1: self.conv1 = layers.ConvVarianceUnif(1, int(20 * flags.width), 5, padding=0, bias=False) else: self.conv1 = nn.Conv2d(1, int(20 * flags.width), 5, padding=0, bias=False) self.bn1 = nn.BatchNorm2d(int(20 * flags.width), affine=not flags.no_biases) self.relu1 = nonlinearity() self.pool1 = nn.MaxPool2d(2, padding=0) # Conv-BN-Tanh-Pool if flags.var2: self.conv2 = layers.ConvVarianceUnif(int(20 * flags.width), int(50 * width), 5, padding=0, bias=False) else: self.conv2 = nn.Conv2d(int(20 * flags.width), int(50 * flags.width), 5, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(int(50 * flags.width), affine=not flags.no_biases) self.relu2 = nonlinearity() self.pool2 = nn.MaxPool2d(2, padding=0) self.flatten = layers.FlattenLayer(int(800 * flags.width)) if not (flags.var1 or flags.var2 or flags.var3 or flags.var4): self.do1 = nn.Dropout(0.5) # Dense-BN-Tanh if flags.var3: self.dense1 = layers.LinearVarianceUnif(int(800 * flags.width), int(500 * flags.width), bias=False) else: self.dense1 = nn.Linear(int(800 * flags.width), int(500 * flags.width), bias=False) self.bn3 = nn.BatchNorm1d(int(500 * flags.width), affine=not flags.no_biases) self.relu3 = nonlinearity() # Dense if flags.var4: self.dense2 = layers.LinearVarianceUnif(int(500 * flags.width), 10, bias=False) else: self.dense2 = nn.Linear(int(500 * flags.width), 10, bias=False)