コード例 #1
0
            x = self.fc2(x)
            return x, F.log_softmax(x)

    model = Net()
    model = nn.DataParallel(model)

elif args.network == 'Alexnet':
    model = alexnet.AlexNet()
elif args.network == 'Vgg':
    model = vgg.vgg16_mnist_bn()
    print(model)
elif args.network == 'Resnet34':
    model = resnet.ResNet34()

elif args.network == 'Resnet':
    model = resnet.ResNet50()

elif args.network == 'Densenet':
    model = densenet.densenet_cifar()
    #print(model)

if args.cuda:
    model.cuda(args.gpu)
optimizer = optim.SGD(model.parameters(),
                      lr=args.lr,
                      momentum=args.momentum,
                      weight_decay=args.weight_decay)


def adjust_learning_rate(optimizer, epoch):
コード例 #2
0
            x = x.view(-1, 500)
            x = F.relu(self.fc1(x))
            x = F.dropout(x, training=self.training)
            x = self.fc2(x)
            return x, F.log_softmax(x)


    model = Net()

elif args.network == 'Alexnet':
    model = alexnet.AlexNet(num_classes=100)
elif args.network == 'Vgg':
    model = vgg.vgg16()
    print(model)
elif args.network == 'Resnet':
    model = resnet.ResNet50(num_classes=100)
elif args.network == 'Densenet':
    model = densenet.densenet_cifar(num_classes=100)
    #print(model)





if args.cuda:
    model.cuda(args.gpu)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

def adjust_learning_rate(optimizer, epoch):

    lr = args.lr * (0.1 ** (epoch // args.lr_decay))