testloader = torch.utils.data.DataLoader(
    testset,
    batch_size=BATCH_SIZE,
    shuffle=False,
    num_workers=4
)

net = None
if args.depth == 18:
    net = sresnet.resnet18(num_classes=args.class_num, align="CONV")
    print("using resnet 18")
if args.depth == 50:
    net = sresnet.resnet50(num_classes=args.class_num, align="CONV")
    print("using resnet 50")
if args.depth == 101:
    net = sresnet.resnet101(num_classes=args.class_num, align="CONV")
    print("using resnet 101")
if args.depth == 152:
    net = sresnet.resnet152(num_classes=args.class_num, align="CONV")
    print("using resnet 152")

net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=LR, weight_decay=5e-4, momentum=0.9)

if __name__ == "__main__":
    best_acc = 0
    print("Start Training")  # 定义遍历数据集的次数
    with open("acc.txt", "w") as f:
        with open("log.txt", "w")as f2:
            for epoch in range(args.epoch):
Beispiel #2
0
                                         num_workers=4)

net = None
if args.depth == 18:
    net = sresnet.resnet18(num_classes=args.class_num,
                           align="CONV",
                           pretrained=False)
    print("using resnet 18")
if args.depth == 50:
    net = sresnet.resnet50(num_classes=args.class_num,
                           align="CONV",
                           pretrained=False)
    print("using resnet 50")
if args.depth == 101:
    net = sresnet.resnet101(num_classes=args.class_num,
                            align="CONV",
                            pretrained=False)
    print("using resnet 101")
if args.depth == 152:
    net = sresnet.resnet152(num_classes=args.class_num,
                            align="CONV",
                            pretrained=False)
    print("using resnet 152")

net.to(device)
net.load_state_dict(torch.load(args.load_path))

if __name__ == "__main__":
    best_acc = 0

    # create experiment directory