Esempio n. 1
0
def forward_pytorch(weightfile, image):
    #net=resnet.resnet18()
    #net = resnet.resnet18()
    net=LeNet(1,2)
    checkpoint = torch.load(weightfile)
    net.load_state_dict(checkpoint['weight'])
    net.double()                # to double
    if args.cuda:
        net.cuda()
    print(net)
    net.eval()
    image = torch.from_numpy(image.astype(np.float64)) # to double

    if args.cuda:
        image = Variable(image.cuda())
    else:
        image = Variable(image)
    t0 = time.time()
    blobs = net.forward(image)
    print(blobs.data.numpy().flatten())
    t1 = time.time()
    return t1-t0, blobs, net, torch.from_numpy(blobs.data.numpy())
            loss_mean = loss_mean / log_interval
            print(
                "Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}"
                .format(epoch, MAX_EPOCH, i + 1, len(train_loader), loss_mean,
                        correct / total))
            loss_mean = 0.

    scheduler.step()  # 更新学习率

    # validate the model
    if (epoch + 1) % val_interval == 0:

        correct_val = 0.
        total_val = 0.
        loss_val = 0.
        net.eval()
        with torch.no_grad():
            for j, data in enumerate(valid_loader):
                inputs, labels = data
                outputs = net(inputs)
                loss = criterion(outputs, labels)

                _, predicted = torch.max(outputs.data, 1)
                total_val += labels.size(0)
                correct_val += (predicted == labels).squeeze().sum().numpy()

                loss_val += loss.item()

            valid_curve.append(loss_val)
            print(
                "Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}"
def train_lenet(split_dir):
    """训练lenet"""
    set_seed()  # 设置随机种子
    rmb_label = {"1": 0, "100": 1}

    # 参数设置
    MAX_EPOCH = 10
    BATCH_SIZE = 16
    LR = 0.01
    log_interval = 10
    val_interval = 1
    """Step 1: 数据读取"""
    train_dir = os.path.join(split_dir, "train")
    valid_dir = os.path.join(split_dir, "valid")
    test_dir = os.path.join(split_dir, "test")

    norm_mean = [0.485, 0.456, 0.406]
    norm_std = [0.229, 0.224, 0.225]

    # 对训练数据进行变换,添加RandomCrop进行数据增强
    train_transform = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.RandomCrop(32, padding=4),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    # 对验证数据进行变换
    valid_transform = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    # 构建RMBDataset实例
    train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
    valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)

    # 构建DataLoader
    train_loader = DataLoader(dataset=train_data,
                              batch_size=BATCH_SIZE,
                              shuffle=True)
    valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)
    """Step 2: 模型"""
    net = LeNet(classes=2)
    net.initialize_weights()
    """Step 3: 损失函数"""
    criterion = nn.CrossEntropyLoss()
    """Step 4: 优化器"""
    optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=10,
                                                gamma=0.1)
    """Step 5: 训练"""
    train_curve = []
    valid_curve = []

    figure_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
                              'result')

    for epoch in range(MAX_EPOCH):
        loss_mean = 0.
        correct = 0.
        total = 0.

        net.train()
        for i, data in enumerate(train_loader):

            # forward
            inputs, labels = data
            outputs = net(inputs)

            # backward
            optimizer.zero_grad()
            loss = criterion(outputs, labels)
            loss.backward()

            # update weights
            optimizer.step()

            # 统计分类情况
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).squeeze().sum().numpy()

            # 打印训练信息
            loss_mean += loss.item()
            train_curve.append(loss.item())
            if (i + 1) % log_interval == 0:
                loss_mean = loss_mean / log_interval
                print(
                    "Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}"
                    .format(epoch, MAX_EPOCH, i + 1, len(train_loader),
                            loss_mean, correct / total))
                loss_mean = 0.

        scheduler.step()  # 更新学习率

        # validate the model
        if (epoch + 1) % val_interval == 0:

            correct_val = 0.
            total_val = 0.
            loss_val = 0.
            net.eval()
            with torch.no_grad():
                for j, data in enumerate(valid_loader):
                    inputs, labels = data
                    outputs = net(inputs)
                    loss = criterion(outputs, labels)

                    _, predicted = torch.max(outputs.data, 1)
                    total_val += labels.size(0)
                    correct_val += (
                        predicted == labels).squeeze().sum().numpy()

                    loss_val += loss.item()

                valid_curve.append(loss_val / valid_loader.__len__())
                print(
                    "Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}"
                    .format(epoch, MAX_EPOCH, j + 1, len(valid_loader),
                            loss_val, correct_val / total_val))

    train_x = range(len(train_curve))
    train_y = train_curve

    train_iters = len(train_loader)
    valid_x = np.arange(
        1,
        len(valid_curve) + 1
    ) * train_iters * val_interval  # 由于valid中记录的是epochloss,需要对记录点进行转换到iterations
    valid_y = valid_curve

    plt.plot(train_x, train_y, label='Train')
    plt.plot(valid_x, valid_y, label='Valid')

    plt.legend(loc='upper right')
    plt.ylabel('loss value')
    plt.xlabel('Iteration')
    figure_path = os.path.join(figure_dir, '0201.png')
    plt.savefig(figure_path)
    plt.close()
def main():

    parser = argparse.ArgumentParser()
    mode_group = parser.add_mutually_exclusive_group(required=True)
    mode_group.add_argument("--train",
                            action="store_true",
                            help="To train the network.")
    mode_group.add_argument("--test",
                            action="store_true",
                            help="To test the network.")
    parser.add_argument("--epochs",
                        default=10,
                        type=int,
                        help="Desired number of epochs.")
    parser.add_argument("--dropout",
                        action="store_true",
                        help="Whether to use dropout or not.")
    parser.add_argument("--uncertainty",
                        action="store_true",
                        help="Use uncertainty or not.")
    parser.add_argument("--dataset",
                        action="store_true",
                        help="The dataset to use.")
    parser.add_argument("--outsample",
                        action="store_true",
                        help="Use out of sample test image")

    uncertainty_type_group = parser.add_mutually_exclusive_group()
    uncertainty_type_group.add_argument(
        "--mse",
        action="store_true",
        help=
        "Set this argument when using uncertainty. Sets loss function to Expected Mean Square Error."
    )
    uncertainty_type_group.add_argument(
        "--digamma",
        action="store_true",
        help=
        "Set this argument when using uncertainty. Sets loss function to Expected Cross Entropy."
    )
    uncertainty_type_group.add_argument(
        "--log",
        action="store_true",
        help=
        "Set this argument when using uncertainty. Sets loss function to Negative Log of the Expected Likelihood."
    )

    dataset_type_group = parser.add_mutually_exclusive_group()
    dataset_type_group.add_argument(
        "--mnist",
        action="store_true",
        help="Set this argument when using MNIST dataset")
    dataset_type_group.add_argument(
        "--emnist",
        action="store_true",
        help="Set this argument when using EMNIST dataset")
    dataset_type_group.add_argument(
        "--CIFAR",
        action="store_true",
        help="Set this argument when using CIFAR dataset")
    dataset_type_group.add_argument(
        "--fmnist",
        action="store_true",
        help="Set this argument when using FMNIST dataset")
    args = parser.parse_args()

    if args.dataset:
        if args.mnist:
            from mnist import dataloaders, label_list
        elif args.CIFAR:
            from CIFAR import dataloaders, label_list
        elif args.fmnist:
            from fashionMNIST import dataloaders, label_list

    if args.train:
        num_epochs = args.epochs
        use_uncertainty = args.uncertainty
        num_classes = 10
        model = LeNet(dropout=args.dropout)

        if use_uncertainty:
            if args.digamma:
                criterion = edl_digamma_loss
            elif args.log:
                criterion = edl_log_loss
            elif args.mse:
                criterion = edl_mse_loss
            else:
                parser.error(
                    "--uncertainty requires --mse, --log or --digamma.")
        else:
            criterion = nn.CrossEntropyLoss()

        optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=0.005)

        exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                                     step_size=7,
                                                     gamma=0.1)

        device = get_device()
        model = model.to(device)

        model, metrics = train_model(model,
                                     dataloaders,
                                     num_classes,
                                     criterion,
                                     optimizer,
                                     scheduler=exp_lr_scheduler,
                                     num_epochs=num_epochs,
                                     device=device,
                                     uncertainty=use_uncertainty)

        state = {
            "epoch": num_epochs,
            "model_state_dict": model.state_dict(),
            "optimizer_state_dict": optimizer.state_dict(),
        }

        if use_uncertainty:
            if args.digamma:
                torch.save(state, "./results/model_uncertainty_digamma.pt")
                print("Saved: ./results/model_uncertainty_digamma.pt")
            if args.log:
                torch.save(state, "./results/model_uncertainty_log.pt")
                print("Saved: ./results/model_uncertainty_log.pt")
            if args.mse:
                torch.save(state, "./results/model_uncertainty_mse.pt")
                print("Saved: ./results/model_uncertainty_mse.pt")

        else:
            torch.save(state, "./results/model.pt")
            print("Saved: ./results/model.pt")

    elif args.test:

        use_uncertainty = args.uncertainty
        device = get_device()
        model = LeNet()
        model = model.to(device)
        optimizer = optim.Adam(model.parameters())

        if use_uncertainty:
            if args.digamma:
                checkpoint = torch.load(
                    "./results/model_uncertainty_digamma.pt")
            if args.log:
                checkpoint = torch.load("./results/model_uncertainty_log.pt")
            if args.mse:
                checkpoint = torch.load("./results/model_uncertainty_mse.pt")
        else:
            checkpoint = torch.load("./results/model.pt")

        filename = "./results/rotate.jpg"
        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])

        model.eval()
        if args.outsample:
            img = Image.open("./data/arka.jpg").convert('L').resize((28, 28))
            img = TF.to_tensor(img)
            img.unsqueeze_(0)
        else:
            a = iter(dataloaders['test'])
            img, label = next(a)
        rotating_image_classification(model,
                                      img,
                                      filename,
                                      label_list,
                                      uncertainty=use_uncertainty)

        img = transforms.ToPILImage()(img[0][0])
        test_single_image(model, img, label_list, uncertainty=use_uncertainty)
        model.train()

        for i, (x_batch, y_batch) in enumerate(trainloader):
            if cuda:
                x_batch, y_batch = x_batch.cuda(), y_batch.cuda()

            model.optimizer.zero_grad()

            # forward + backward + optimize
            output = model(x_batch)
            loss = model.criterion(output, y_batch)
            loss.backward()
            model.optimizer.step()

            if i % args.log_interval == 0:
                print('Epoch: {:3d}, Batch {:3d}/{}, Loss: {:.5f}'.format(
                    epoch, i, len(trainloader), loss.item()))

        model.eval()
        correct, total = 0, 0
        for i, (x_batch, y_batch_true) in enumerate(testloader):
            if cuda:
                x_batch, y_batch_true = x_batch.cuda(), y_batch_true.cuda()

            y_batch_pred = torch.argmax(model.forward(x_batch), dim=1)

            correct += torch.sum(torch.eq(y_batch_pred, y_batch_true)).item()
            total += x_batch.shape[0]

        print('Accuracy: {}\n\n\n\n'.format(correct / total))
Esempio n. 6
0
class Ui(QtWidgets.QMainWindow):
    buttons = [
       "load_image", "color_conversion", "image_flipping",
       "blending", "global_threshold", "local_threshold",
       "gaussian", "sobel_x", "sobel_y", "magnitude", "rst",
       "show_train_image", "show_hyper", "train_1", "pt",
       "inference", "ok", "show_train_result", "cancel"]
    inputs = ["angle", "scale", "tx", "ty", "test_index"]

    def __init__(self):
        super(Ui, self).__init__()

        uic.loadUi('main_window.ui', self)
        self.get_widgets()
        self.get_input()
        self.bind_event()
        self.param_setup()
        self.torch_setup()
        self.show()

    def get_widgets(self):
        for btn in self.buttons:
            setattr(self, btn, self.findChild(QtWidgets.QPushButton, btn))
    
    def get_input(self):
        for inp in self.inputs:
            setattr(self, inp, self.findChild(QtWidgets.QLineEdit, inp))


    def bind_event(self):
        for btn in self.buttons:
            getattr(self, btn).clicked.connect(partial(
                getattr(events,  btn), 
                self))
    def param_setup(self):
        self.batch_size = 32
        self.learning_rate = 0.001
        self.opt = "SGD"
        self.loss_list = []
        self.loss_epoch = []
        self.acc_train_epoch = []
        self.acc_test_epoch = []
        self.compose = transforms.Compose([
            transforms.Resize((32,32)),
            transforms.ToTensor()
        ])


    def torch_setup(self):
        self.data_train = MNIST('./data/mnist',
                            train=True,
                            download=True,
                            transform=self.compose)
                            
        self.data_test = MNIST('./data/mnist',
                            train=False,
                            download=True,
                            transform=self.compose)
        self.data_train_loader = DataLoader(self.data_train, batch_size=self.batch_size, shuffle=True, num_workers=4)
        self.data_test_loader = DataLoader(self.data_test, batch_size=self.batch_size, num_workers=4)
        self.criterion = nn.CrossEntropyLoss()
        self.net = LeNet()
        self.optimizer = getattr(optim, self.opt)(self.net.parameters(), lr=self.learning_rate)

        try:
            self.net.load_state_dict(load('model_params.pkl'))
            self.loaded = True
            print("Loaded")
        except Exception as e:
            print(e)
            self.loaded = False
            print("Not loaded")

    def train(self, epoch):
        self.net.train()
        self.loss_list = []
        correct, total = 0, 0
        for i, (images, labels) in enumerate(self.data_train_loader):
            self.optimizer.zero_grad()
            output = self.net(images)
            loss = self.criterion(output, labels)
            pred = output.data.max(1, keepdim=True)[1]
            correct += np.sum(np.squeeze(pred.eq(labels.data.view_as(pred))).cpu().numpy())
            total += images.size(0)
            self.loss_list.append(loss.detach().cpu().item())

            if i % 100 == 0:
                print(f'Train - Epoch {epoch}, Batch: {i}, Loss: {loss.detach().cpu().item()}')

            loss.backward()
            self.optimizer.step()
        self.acc_train_epoch.append(correct/total)
        self.loss_epoch.append(sum(self.loss_list)/len(self.loss_list))

    def test(self):
        self.net.eval()
        total_correct, avg_loss = 0, 0.0
        for i, (images, labels) in enumerate(self.data_test_loader):
            output = self.net(images)
            avg_loss += self.criterion(output, labels).sum()
            pred = output.detach().max(1)[1]
            total_correct += pred.eq(labels.view_as(pred)).sum()

        avg_loss /= len(self.data_test)
        acc = float(total_correct)/len(self.data_test)
        self.acc_test_epoch.append(acc)

    def test_and_train(self, epoch):
        self.train(epoch)
        self.test()
def main():

    parser = argparse.ArgumentParser()
    mode_group = parser.add_mutually_exclusive_group(required=True)
    mode_group.add_argument("--train",
                            action="store_true",
                            help="To train the network.")
    mode_group.add_argument("--test",
                            action="store_true",
                            help="To test the network.")
    mode_group.add_argument("--examples",
                            action="store_true",
                            help="To example MNIST data.")
    parser.add_argument("--epochs",
                        default=10,
                        type=int,
                        help="Desired number of epochs.")
    parser.add_argument("--dropout",
                        action="store_true",
                        help="Whether to use dropout or not.")
    parser.add_argument("--uncertainty",
                        action="store_true",
                        help="Use uncertainty or not.")
    uncertainty_type_group = parser.add_mutually_exclusive_group()
    uncertainty_type_group.add_argument(
        "--mse",
        action="store_true",
        help=
        "Set this argument when using uncertainty. Sets loss function to Expected Mean Square Error."
    )
    uncertainty_type_group.add_argument(
        "--digamma",
        action="store_true",
        help=
        "Set this argument when using uncertainty. Sets loss function to Expected Cross Entropy."
    )
    uncertainty_type_group.add_argument(
        "--log",
        action="store_true",
        help=
        "Set this argument when using uncertainty. Sets loss function to Negative Log of the Expected Likelihood."
    )
    args = parser.parse_args()

    if args.examples:
        examples = enumerate(dataloaders["val"])
        batch_idx, (example_data, example_targets) = next(examples)
        fig = plt.figure()
        for i in range(6):
            plt.subplot(2, 3, i + 1)
            plt.tight_layout()
            plt.imshow(example_data[i][0], cmap="gray", interpolation="none")
            plt.title("Ground Truth: {}".format(example_targets[i]))
            plt.xticks([])
            plt.yticks([])
        plt.savefig("./images/examples.jpg")

    elif args.train:
        num_epochs = args.epochs
        use_uncertainty = args.uncertainty
        num_classes = 10

        model = LeNet(dropout=args.dropout)

        if use_uncertainty:
            if args.digamma:
                criterion = edl_digamma_loss
            elif args.log:
                criterion = edl_log_loss
            elif args.mse:
                criterion = edl_mse_loss
            else:
                parser.error(
                    "--uncertainty requires --mse, --log or --digamma.")
        else:
            criterion = nn.CrossEntropyLoss()

        optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=0.005)

        exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                                     step_size=7,
                                                     gamma=0.1)

        device = get_device()
        model = model.to(device)

        model, metrics = train_model(model,
                                     dataloaders,
                                     num_classes,
                                     criterion,
                                     optimizer,
                                     scheduler=exp_lr_scheduler,
                                     num_epochs=num_epochs,
                                     device=device,
                                     uncertainty=use_uncertainty)

        state = {
            "epoch": num_epochs,
            "model_state_dict": model.state_dict(),
            "optimizer_state_dict": optimizer.state_dict(),
        }

        if use_uncertainty:
            if args.digamma:
                torch.save(state, "./results/model_uncertainty_digamma.pt")
                print("Saved: ./results/model_uncertainty_digamma.pt")
            if args.log:
                torch.save(state, "./results/model_uncertainty_log.pt")
                print("Saved: ./results/model_uncertainty_log.pt")
            if args.mse:
                torch.save(state, "./results/model_uncertainty_mse.pt")
                print("Saved: ./results/model_uncertainty_mse.pt")

        else:
            torch.save(state, "./results/model.pt")
            print("Saved: ./results/model.pt")

    elif args.test:

        use_uncertainty = args.uncertainty
        device = get_device()
        model = LeNet()
        model = model.to(device)
        optimizer = optim.Adam(model.parameters())

        if use_uncertainty:
            if args.digamma:
                checkpoint = torch.load(
                    "./results/model_uncertainty_digamma.pt")
                filename = "./results/rotate_uncertainty_digamma.jpg"
            if args.log:
                checkpoint = torch.load("./results/model_uncertainty_log.pt")
                filename = "./results/rotate_uncertainty_log.jpg"
            if args.mse:
                checkpoint = torch.load("./results/model_uncertainty_mse.pt")
                filename = "./results/rotate_uncertainty_mse.jpg"

        else:
            checkpoint = torch.load("./results/model.pt")
            filename = "./results/rotate.jpg"

        model.load_state_dict(checkpoint["model_state_dict"])
        optimizer.load_state_dict(checkpoint["optimizer_state_dict"])

        model.eval()

        rotating_image_classification(model,
                                      digit_one,
                                      filename,
                                      uncertainty=use_uncertainty)

        img = Image.open("./data/one.jpg").convert('L')

        test_single_image(model, img, uncertainty=use_uncertainty)
Esempio n. 8
0
def evalidation(model_name, testloader, classes, input_channel=3, self_define=True):
    dataiter = iter(testloader)
    images, labels = dataiter.next()

    # print images
    imshow(torchvision.utils.make_grid(images))
    print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(batch_size)))

    # load model parameter
    assert model_name in ["LeNet", "VGG16", "ResNet", "DenseNet"]
    param_path = "./model/%s_%s_parameter.pt" % (model_name, "define" if self_define else "official")
    print("load model parameter from %s" % param_path)
    if self_define:
        if model_name == "LeNet":
            net = LeNet(input_channel)
        elif model_name == "VGG16":
            net = VGG16(input_channel)
        elif model_name == "ResNet":
            net = ResNet(input_channel)
        elif model_name == "DenseNet":
            net = DenseNet(input_channel)
    else:
        if model_name == "LeNet":
            net = LeNet(input_channel)
        elif model_name == "VGG16":
            net = models.vgg16_bn(pretrained=False, num_classes=10)
        elif model_name == "ResNet":
            net = models.resnet50(pretrained=False, num_classes=10)
        elif model_name == "DenseNet":
            net = models.DenseNet(num_classes=10)


    net.load_state_dict(torch.load(param_path))
    net.eval()

    # predict
    outputs = net(images)
    _, predicted = torch.max(outputs, 1)
    print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(batch_size)))

    # to gpu
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net.to(device)

    # evaluate
    class_correct = np.zeros(10)
    class_total = np.zeros(10)
    with torch.no_grad():
        for data in testloader:
            inputs, labels = data[0].to(device), data[1].to(device)
            outputs = net(inputs)
            _, predicted = torch.max(outputs, 1)

            for i in range(batch_size):
                label = labels[i]
                class_total[label] += 1
                if predicted[i] == label:
                    class_correct[label] += 1

    print("\nEvery class precious: \n ",
          ' '.join("%5s : %2d %%\n" % (classes[i], 100 * class_correct[i]/class_total[i]) for i in range(len(classes))))
    print("\n%d images in all, Total precious: %2d %%"
          % (np.sum(class_total), 100 * np.sum(class_correct) / np.sum(class_total)))