Exemple #1
0
    def __init__(self, args):
        self.lr = args.lr
        self.wd = args.wd
        self.epochs = 200
        self.dataset = args.dataset
        self.test_uncertainty = args.test_uncertainty
        self.vote = args.vote
        self.n_models = args.n_models
        self.device = torch.device('cuda')
        torch.manual_seed(8734)

        self.ensemble = [
            models.LeNet().to(self.device) for _ in range(self.n_models)
        ]
        self.attach_optimizers()

        if self.dataset == 'mnist':
            self.data_train, self.data_test = datagen.load_mnist()
        elif self.dataset == 'cifar':
            self.data_train, self.data_test = datagen.load_cifar()

        self.best_test_acc = 0.
        self.best_test_loss = np.inf
        print(self.ensemble[0], ' X {}'.format(self.n_models))
Exemple #2
0
print('step1:load the datasets...')

data, label = read_img(path)  # 调用read_img()函数,读取图片数据和对应的标签
x_train, y_train, x_val, y_val = shuffle_data(
    data, label)  # 调用shuffle_data()函数,打乱数据集,并划分数据集

# step2: 构建模型并开始训练、测试
print('step2: build the model and training...')

# 占位符
x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')
y_ = tf.placeholder(tf.int32, shape=[
    None,
], name='y_')

logits, pred = lenet.LeNet(x)  # pred 是经过softmax处理过的
loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits)
#train_op = tf.train.AdadeltaOptimizer(learning_rate=0.001).minimize(loss)
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(
    loss)  # 和上面的优化器不同
correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

num_epoches = 10
batch_size = 64

train_losses = []
train_acces = []
val_losses = []
val_acces = []
sess = tf.InteractiveSession()
                               download=False)  # 为True需要先下载,然后处理;为False使用下载完成的

test_dataset = datasets.MNIST(root='./MNIST_data',
                              train=False,
                              transform=data_tf)

train_loader = DataLoader(train_dataset, batch_size=batch_size,
                          shuffle=True)  #训练集,shuffle为True时打乱数据,让数据更有选择随机性
test_loader = DataLoader(test_dataset, batch_size=batch_size,
                         shuffle=False)  #对测试集进行迭代器编号

# 60000张训练集,batch_size为50 => 60000/50=1200,即每次模型输入50个数据,要进行1200次
# print(len(train_loader))

# 选择模型
model = lenet.LeNet()
#print(model) # 查看模型架构
criterion = nn.CrossEntropyLoss()  # 定义损失函数
optimizer = optim.SGD(model.parameters(), lr=learning_rate)  # 定义优化器

#'''训练之前定义一些空列表来添加训练得到的一些值 '''
train_losses = []
train_acces = []
eval_losses = []
eval_acces = []

for epoch in range(num_epoches):
    # 每次epoch训练
    train_loss = 0
    train_acc = 0
    model.train()
Exemple #4
0
from models import preact_resnet_model
from models import alexnet
from models import lenet

_MODEL_DICT = {
    'resnet18': resnet_model.ResNet18(),
    'resnet34': resnet_model.ResNet34(),
    'resnet50': resnet_model.ResNet50(),
    'resnet101': resnet_model.ResNet101(),
    'resnet152': resnet_model.ResNet152(),
    'preact_resnet18': preact_resnet_model.PreActResNet18(),
    'preact_resnet34': preact_resnet_model.PreActResNet34(),
    'preact_resnet50': preact_resnet_model.PreActResNet50(),
    'preact_resnet101': preact_resnet_model.PreActResNet101(),
    'preact_resnet152': preact_resnet_model.PreActResNet152(),
    'alexnet': alexnet.AlexNet(),
    'lenet': lenet.LeNet()
}


def _invalid_model_name():
    raise ValueError("Not a valid model name")


def fetch_teacher(model_name, model_dicts=_MODEL_DICT):
    return model_dicts[model_name]


def fetch_student(model_name, model_dicts=_MODEL_DICT):
    return model_dicts[model_name]
                                              num_workers=2)

    testset = torchvision.datasets.CIFAR100(root='./data',
                                            train=False,
                                            download=True,
                                            transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=False,
                                             num_workers=2)
    num_classes = 100
else:
    raise "only support dataset CIFAR10 or CIFAR100"

if args.model == "lenet":
    net = lenet.LeNet(num_classes=num_classes)

elif args.model == "vgg16":
    net = vgg.vgg16(num_classes=num_classes, pretrained=args.pretrain)
elif args.model == "vgg16_bn":
    net = vgg.vgg16_bn(num_classes=num_classes, pretrained=args.pretrain)

elif args.model == "resnet18":
    net = resnet.resnet18(num_classes=num_classes, pretrained=args.pretrain)
elif args.model == "resnet34":
    net = resnet.resnet18(num_classes=num_classes, pretrained=args.pretrain)
elif args.model == "resnet50":
    net = resnet.resnet50(num_classes=num_classes, pretrained=args.pretrain)

elif args.model == "resnetv2_18":
    net = resnet_v2.resnet18(num_classes=num_classes, pretrained=args.pretrain)