def predict(opt):
    # net=torch.load('Lenet.pth') # pth格式 只保留参数
    # print('net', net)
    '''
    需要将basic_option中的is_train 修改为false
    '''
    # opt.is_train = False
    acc = 0
    total = 0
    test_dataloader = create_dataloader(opt)
    net = Classification()
    net.load_state_dict(
        torch.load(
            f"./output/train/weights/exp_1/Basic_Epoch_20_Accuracy_0.99.pth"))
    net = net.to(device)
    with torch.no_grad():
        for index, data in enumerate(test_dataloader, start=1):
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = net(images)
            _, predicted = torch.max(outputs.data, dim=1)
            print(f'number {index} picture maybe : {classes[predicted[0]]}')
            total += labels.size(0)
            acc += (predicted == labels).sum().item()
    print('Accuracy on test set : {}%'.format(100 * acc / total))
    classification = Classification().to(device)
    encoder_optim = Adam(encoder.parameters(), lr=1e-4)
    loss_optim = Adam(loss_fn.parameters(), lr=1e-4)
    classification_optim = Adam(classification.parameters(), lr=1e-4)

    epoch_restart = 0
    root = Path(r'models')

    if epoch_restart > 0 and root is not None:
        enc_file = root / Path('encoder' + str(epoch_restart) + '.wgt')
        loss_file = root / Path('loss' + str(epoch_restart) + '.wgt')
        classification_loss_file = root / Path('classification_loss' +
                                               str(epoch_restart) + '.wgt')
        encoder.load_state_dict(torch.load(str(enc_file)))
        loss_fn.load_state_dict(torch.load(str(loss_file)))
        classification.load_state_dict(
            torch.load(str(classification_loss_file)))

    for epoch in range(epoch_restart + 1, 501):
        batch = tqdm(cifar_10_train_l,
                     total=len(cifar_10_train_dt) // batch_size)
        train_loss = []
        for x, target, rot in batch:
            x = x.to(device)
            rot = rot.to(device)
            encoder_optim.zero_grad()
            loss_optim.zero_grad()
            classification_optim.zero_grad()
            y, M = encoder(x)
            predicted_value = classification(y)
            criterion = nn.CrossEntropyLoss()
            loss_classification = criterion(predicted_value, rot)