예제 #1
0
 def __init__(self):
     super(Cifar10ConvNet, self).__init__()
     self.conv1 = pytk.Conv2d(3, 32, 3, padding=1)
     self.conv2 = pytk.Conv2d(32, 64, 3, padding=1)
     self.conv3 = pytk.Conv2d(64, 128, 3, padding=1)
     self.fc1 = pytk.Linear(4 * 4 * 128, 512)
     self.out = pytk.Linear(512, NUM_CLASSES)
예제 #2
0
 def __init__(self):
     super(MNISTNet2, self).__init__()
     self.flatten = nn.Flatten()
     self.linear = nn.Sequential(
         pytk.Linear(IMAGE_HEIGHT * IMAGE_WIDTH * NUM_CHANNELS, 128),
         nn.ReLU(),
         nn.Dropout(0.1),
         pytk.Linear(128, 64),
         nn.ReLU(),
         nn.Dropout(0.1),
         # NOTE: we'll be using nn.CrossEntropyLoss(), which includes a
         # logsoftmax call that applies a softmax function to outputs.
         # So, don't apply one yourself!
         pytk.Linear(64, NUM_CLASSES)
     )
예제 #3
0
    def __init__(self):
        super(MNISTConvNet2, self).__init__()
        self.convNet = nn.Sequential(
            pytk.Conv2d(1, 128, kernel_size=3),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Dropout(p=0.20),

            pytk.Conv2d(128, 64, kernel_size=3),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Dropout(p=0.10),

            nn.Flatten(),

            pytk.Linear(7 * 7 * 64, 512),
            nn.ReLU(),
            nn.Dropout(p=0.20),

            pytk.Linear(512, NUM_CLASSES)
        )
예제 #4
0
파일: mnist.py 프로젝트: mjbhobe/dl-pytorch
    def __init__(self, lr):
        super(MNISTModel, self).__init__()
        self.convNet = nn.Sequential(pytk.Conv2d(1, 128, kernel_size=3),
                                     nn.ReLU(), nn.MaxPool2d(2),
                                     nn.Dropout(p=0.20),
                                     pytk.Conv2d(128, 64, kernel_size=3),
                                     nn.ReLU(), nn.MaxPool2d(2),
                                     nn.Dropout(p=0.10), nn.Flatten(),
                                     pytk.Linear(7 * 7 * 64, 512), nn.ReLU(),
                                     nn.Dropout(p=0.20),
                                     pytk.Linear(512, NUM_CLASSES))
        self.lr = lr
        self.loss_fn = nn.CrossEntropyLoss()
        self.train_acc = tm.Accuracy()
        self.val_acc = tm.Accuracy()

        self.train_batch_losses = []
        self.val_batch_losses = []
        self.train_batch_accs = []
        self.val_batch_accs = []

        self.history = {"loss": [], "acc": [], "val_loss": [], "val_acc": []}
        self.log_file = open(os.path.join(os.getcwd(), 'mnist_log.txt'), 'w')
예제 #5
0
 def __init__(self, inp_size, hidden1, num_classes):
     super(WineNet, self).__init__()
     self.fc1 = pytk.Linear(inp_size, hidden1)
     self.out = pytk.Linear(hidden1, num_classes)
 def __init__(self, features):
     super(Net, self).__init__()
     self.fc1 = pytk.Linear(features, 10)
     self.fc2 = pytk.Linear(10, 5)
     self.out = pytk.Linear(5, 1)
예제 #7
0
 def __init__(self):
     super(MNISTConvNet, self).__init__()
     self.conv1 = pytk.Conv2d(1, 128, kernel_size=3)
     self.conv2 = pytk.Conv2d(128, 64, kernel_size=3)
     self.fc1 = pytk.Linear(7 * 7 * 64, 512)
     self.out = pytk.Linear(512, NUM_CLASSES)
예제 #8
0
 def __init__(self):
     super(MNISTNet, self).__init__()
     self.fc1 = pytk.Linear(IMAGE_HEIGHT * IMAGE_WIDTH * NUM_CHANNELS, 128)
     self.fc2 = pytk.Linear(128, 64)
     self.out = pytk.Linear(64, NUM_CLASSES)
     self.dropout = nn.Dropout(0.10)
 def __init__(self, features):
     super(Net, self).__init__()
     self.fc1 = pytk.Linear(features, 32)
     self.fc2 = pytk.Linear(32, 16)
     self.fc3 = pytk.Linear(16, 8)
     self.out = pytk.Linear(8, 1)
예제 #10
0
 def __init__(self):
     super(FMNISTNet, self).__init__()
     self.fc1 = pytk.Linear(IMAGE_HEIGHT * IMAGE_WIDTH * NUM_CHANNELS, 256)
     self.fc2 = pytk.Linear(256, 128)
     self.out = pytk.Linear(128, NUM_CLASSES)
예제 #11
0
 def __init__(self, inp_size, hidden1, num_classes):
     super(WineNet, self).__init__()
     self.fc1 = pytk.Linear(inp_size, hidden1)
     self.relu1 = nn.ReLU()
     self.out = pytk.Linear(hidden1, num_classes)
     self.dropout = nn.Dropout(0.20)