def __init__(self, num_classes=10, num_channels=1): super(AlexNet, self).__init__() self.num_classes = num_classes self.input_size = 224 self.dimension = 4 self.conv1 = layer.Conv2d(num_channels, 64, 11, stride=4, padding=2) self.conv2 = layer.Conv2d(64, 192, 5, padding=2) self.conv3 = layer.Conv2d(192, 384, 3, padding=1) self.conv4 = layer.Conv2d(384, 256, 3, padding=1) self.conv5 = layer.Conv2d(256, 256, 3, padding=1) self.linear1 = layer.Linear(4096) self.linear2 = layer.Linear(4096) self.linear3 = layer.Linear(num_classes) self.pooling1 = layer.MaxPool2d(2, 2, padding=0) self.pooling2 = layer.MaxPool2d(2, 2, padding=0) self.pooling3 = layer.MaxPool2d(2, 2, padding=0) self.avg_pooling1 = layer.AvgPool2d(3, 2, padding=0) self.relu1 = layer.ReLU() self.relu2 = layer.ReLU() self.relu3 = layer.ReLU() self.relu4 = layer.ReLU() self.relu5 = layer.ReLU() self.relu6 = layer.ReLU() self.relu7 = layer.ReLU() self.flatten = layer.Flatten() self.dropout1 = layer.Dropout() self.dropout2 = layer.Dropout() self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
def __init__(self, perceptron_size=100, num_classes=10): super(MLP, self).__init__() self.num_classes = num_classes self.relu = layer.ReLU() self.linear1 = layer.Linear(perceptron_size) self.linear2 = layer.Linear(num_classes) self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
def __init__(self): self.conv1 = layer.Conv2d(1, 20, 5, padding=0) self.conv2 = layer.Conv2d(20, 50, 5, padding=0) self.linear1 = layer.Linear(4 * 4 * 50, 500) self.linear2 = layer.Linear(500, 10) self.pooling1 = layer.MaxPool2d(2, 2, padding=0) self.pooling2 = layer.MaxPool2d(2, 2, padding=0) self.relu1 = layer.ReLU() self.relu2 = layer.ReLU() self.relu3 = layer.ReLU() self.flatten = layer.Flatten()
def __init__(self, num_classes=10, num_channels=1): super(CNN, self).__init__() self.num_classes = num_classes self.input_size = 28 self.dimension = 4 self.conv1 = layer.Conv2d(num_channels, 20, 5, padding=0, activation="RELU") self.conv2 = layer.Conv2d(20, 50, 5, padding=0, activation="RELU") self.linear1 = layer.Linear(500) self.linear2 = layer.Linear(num_classes) self.pooling1 = layer.MaxPool2d(2, 2, padding=0) self.pooling2 = layer.MaxPool2d(2, 2, padding=0) self.relu = layer.ReLU() self.flatten = layer.Flatten() self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
def __init__(self, block, layers, num_classes=10, num_channels=3): self.inplanes = 64 super(ResNet, self).__init__() self.num_classes = num_classes self.input_size = 224 self.dimension = 4 self.conv1 = layer.Conv2d(num_channels, 64, 7, stride=2, padding=3, bias=False) self.bn1 = layer.BatchNorm2d(64) self.relu = layer.ReLU() self.maxpool = layer.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1, layers1 = self._make_layer(block, 64, layers[0]) self.layer2, layers2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3, layers3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4, layers4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = layer.AvgPool2d(7, stride=1) self.flatten = layer.Flatten() self.fc = layer.Linear(num_classes) self.softmax_cross_entropy = layer.SoftMaxCrossEntropy() self.register_layers(*layers1, *layers2, *layers3, *layers4)
def __init__(self, onnx_model, num_classes=10, image_size=224, num_channels=3): super(MyModel, self).__init__(onnx_model) self.dimension = 4 self.num_classes = num_classes self.input_size = image_size self.num_channels = num_channels self.linear = layer.Linear(512, num_classes)
def __init__(self, noise_size=100, feature_size=784, hidden_size=128): super(LSGAN_MLP, self).__init__() self.noise_size = noise_size self.feature_size = feature_size self.hidden_size = hidden_size # Generative Net self.gen_net_fc_0 = layer.Linear(self.hidden_size) self.gen_net_relu_0 = layer.ReLU() self.gen_net_fc_1 = layer.Linear(self.feature_size) self.gen_net_sigmoid_1 = layer.Sigmoid() # Discriminative Net self.dis_net_fc_0 = layer.Linear(self.hidden_size) self.dis_net_relu_0 = layer.ReLU() self.dis_net_fc_1 = layer.Linear(1) self.mse_loss = layer.MeanSquareError()
def __init__(self, hidden_size, mode='lstm', return_sequences=False, bidirectional="False", num_layers=1): super().__init__() batch_first = True self.lstm = layer.CudnnRNN(hidden_size=hidden_size, batch_first=batch_first, rnn_mode=mode, return_sequences=return_sequences, num_layers=1, dropout=0.9, bidirectional=bidirectional) self.l1 = layer.Linear(64) self.l2 = layer.Linear(2)
def __init__(self, hidden_size): super(LSTMModel3, self).__init__() self.lstm = layer.CudnnRNN( hidden_size=hidden_size, batch_first=True, # return_sequences=True, use_mask=True) self.l1 = layer.Linear(2) self.optimizer = opt.SGD(0.1)
def __init__(self, noise_size=100, feature_size=784, hidden_size=128): super(GAN_MLP, self).__init__() self.noise_size = noise_size self.feature_size = feature_size self.hidden_size = hidden_size # Generative Net self.gen_net_fc_0 = layer.Linear(self.hidden_size) self.gen_net_relu_0 = layer.ReLU() self.gen_net_fc_1 = layer.Linear(self.feature_size) self.gen_net_sigmoid_1 = layer.Sigmoid() # Discriminative Net self.dis_net_fc_0 = layer.Linear(self.hidden_size) self.dis_net_relu_0 = layer.ReLU() self.dis_net_fc_1 = layer.Linear(1) self.dis_net_sigmoid_1 = layer.Sigmoid() self.binary_cross_entropy = layer.BinaryCrossEntropy()
def __init__(self, vocab_size, hidden_size=32): super(CharRNN, self).__init__() self.rnn = layer.LSTM(vocab_size, hidden_size) self.cat = layer.Cat() self.reshape1 = layer.Reshape() self.dense = layer.Linear(hidden_size, vocab_size) self.reshape2 = layer.Reshape() self.softmax_cross_entropy = layer.SoftMaxCrossEntropy() self.optimizer = opt.SGD(0.01) self.hidden_size = hidden_size self.vocab_size = vocab_size
def __init__(self, onnx_model, num_classes=10, num_channels=3, last_layers=-1, in_dim=1000): super(MyModel, self).__init__(onnx_model) self.num_classes = num_classes self.input_size = 224 self.dimension = 4 self.num_channels = num_channels self.num_classes = num_classes self.last_layers = last_layers self.linear = layer.Linear(in_dim, num_classes)
def __init__(self, hidden_size): super().__init__() self.linear_q = layer.Linear(hidden_size) self.linear_a = layer.Linear(hidden_size)
def __init__(self, a, b, c): super(DoubleLinear, self).__init__() self.l1 = layer.Linear(a, b) self.l2 = layer.Linear(b, c)
def __init__(self, num_classes=10, num_channels=3): """ Constructor Args: num_classes: number of classes """ super(Xception, self).__init__() self.num_classes = num_classes self.input_size = 299 self.dimension = 4 self.conv1 = layer.Conv2d(num_channels, 32, 3, 2, 0, bias=False) self.bn1 = layer.BatchNorm2d(32) self.relu1 = layer.ReLU() self.conv2 = layer.Conv2d(32, 64, 3, 1, 1, bias=False) self.bn2 = layer.BatchNorm2d(64) self.relu2 = layer.ReLU() # do relu here self.block1 = Block(64, 128, 2, 2, padding=0, start_with_relu=False, grow_first=True) self.block2 = Block(128, 256, 2, 2, padding=0, start_with_relu=True, grow_first=True) self.block3 = Block(256, 728, 2, 2, padding=0, start_with_relu=True, grow_first=True) self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block12 = Block(728, 1024, 2, 2, start_with_relu=True, grow_first=False) self.conv3 = layer.SeparableConv2d(1024, 1536, 3, 1, 1) self.bn3 = layer.BatchNorm2d(1536) self.relu3 = layer.ReLU() # do relu here self.conv4 = layer.SeparableConv2d(1536, 2048, 3, 1, 1) self.bn4 = layer.BatchNorm2d(2048) self.relu4 = layer.ReLU() self.globalpooling = layer.MaxPool2d(10, 1) self.flatten = layer.Flatten() self.fc = layer.Linear(num_classes) self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()