コード例 #1
0
def eval(args):
    device = torch.device(f"cuda:{args.device_id}")
    model = AlexNet(n_cls = 100)
    model.to(device)
    model.load_state_dict(torch.load(args.pretrained_path))
    model.eval()

    test_loader = getLoaders(split="eval", batch_size = args.batch_size, num_workers=args.num_workers )

    pred_arr = []
    label_arr = []
    with torch.no_grad():
        for idx, (img, label) in tqdm(enumerate(test_loader),total= len(test_loader)):
            img = img.to(device)
            pred = model.pred(img)
            # mean of softmax prob from 10 different aug
            pred = pred.view(-1, 10, 100)
            pred = pred.mean(dim = 1) 
            pred_arr.append(pred.detach().cpu().numpy())
            label_arr.append(label.detach().numpy())
    pred_np = np.concatenate(pred_arr)
    label_np = np.concatenate(label_arr)
    top_1 = utils.top_k_acc(k = 1, pred = pred_np, label= label_np)
    top_5 = utils.top_k_acc(k = 5, pred = pred_np, label= label_np)
    confusion = utils.confusion_matrix(100, pred_np, label_np)
    torch.save({
        "top_1": top_1,
        "top_5": top_5,
        "confusion": confusion,
    }, "result.pth")
    print(f"top_1: {top_1*100:.2f}, top_5: {top_5*100:.2f}")
コード例 #2
0
def test():
    if torch.cuda.device_count() > 1:
        model = torch.nn.parallel.DataParallel(
            AlexNet(num_classes=opt.num_classes))
    else:
        model = AlexNet(num_classes=opt.num_classes)
    model.load_state_dict(
        torch.load(MODEL_PATH, map_location=lambda storage, loc: storage))
    model.to(device)

    # init value
    correct1 = 0.
    correct5 = 0.
    total = len(test_dataloader.dataset)
    with torch.no_grad():
        for i, data in enumerate(test_dataloader):
            # get the inputs; data is a list of [inputs, labels]
            inputs, targets = data
            inputs = inputs.to(device)
            targets = targets.to(device)

            outputs = model(inputs)

            # cal top 1 accuracy
            prec1 = outputs.argmax(dim=1)
            correct1 += torch.eq(prec1, targets).sum().item()

            # cal top 5 accuracy
            maxk = max((1, 2))
            targets_resize = targets.view(-1, 1)
            _, prec5 = outputs.topk(maxk, 1, True, True)
            correct5 += torch.eq(prec5, targets_resize).sum().item()

    return correct1 / total, correct5 / total
コード例 #3
0
def main(args=None):
    logging.basicConfig(format='%(levelname)s:%(message)s',
                        level=logging.DEBUG)
    logging.info('Arquitetura AlexNet')

    parser = argparse.ArgumentParser()
    parser.add_argument('--num_classes',
                        help='Num. de classes.',
                        type=int,
                        default=1000)
    parser.add_argument('--pretrained',
                        help='Serão utilizados pesos pré-treinados.',
                        type=bool,
                        default=True)
    parser.add_argument('--model_url',
                        help='Caminho para os pesos.',
                        default="./pesos/alexnet-owt-4df8aa71.pth")

    opt = parser.parse_args(args)

    # Dados
    proc = Preprocessador()

    imagem_url = "./imagens/raposa.jpg"
    imagem = Image.open(imagem_url)
    imagem = proc.executa(imagem)
    #https://jhui.github.io/2018/02/09/PyTorch-Basic-operations/
    imagem = imagem.unsqueeze(0)

    # Instancia do modelo
    model = AlexNet(opt.num_classes)
    model.eval()

    # Caso deseje utilizar os pesos pré-treinados
    if opt.pretrained:
        checkpoint = torch.load(opt.model_url)
        model.load_state_dict(checkpoint)

    # Utiliza a GPU se existir no computador
    if torch.cuda.is_available():
        model.to('cuda')

    with torch.no_grad():
        saida = model(imagem)

    # Obtem o indice melhor ranqueado
    index = np.argmax(saida[0]).item()
    acuracia = torch.max(saida).item()

    print(getLabel(index), acuracia)
コード例 #4
0
def test(imageFolder):  #测试部分
    is_paramatter = False  #置False为导入整个模型,置True为导入参数文件
    if (is_paramatter):
        net = AlexNet()
        model = torch.load('./model_parameter.pth',
                           map_location=torch.device(device))  #模型参数文件
        net.load_state_dict(model)
    else:
        net = torch.load('./model.pkl', map_location=torch.device(device))
    net = net.to(device)
    torch.set_grad_enabled(False)
    torch.no_grad()
    net.eval()
    data_num = MyDataSet(imageFolder).__len__()
    for i in range(data_num):
        img, ori, name = MyDataSet(imageFolder, data_transform).__getitem__(i)
        out = net(img.to(device, torch.float))
        predict = out.argmax(dim=1)  #预测的label
        probability = out[:, predict]  #该label的概率
        s = 'Predict result: This is a '
        if predict == 0:
            s += 'CAT'
        else:
            s += 'DOG'
        s += ' with the probability of '
        s += str(round(float(probability), 4))
        plt.title(s)
        plt.imshow(ori)
        plt.savefig("./result/" + name.replace('.jpg', '') + ".png",
                    dpi=300)  #将结果保存在result文件夹内
        plt.show()  #显示图片
        print(name + ' Success!')
コード例 #5
0
def main():
    start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch

    # Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100


    trainset = dataloader(root=args.dataroot, train=True, download=True, transform=transform_train)
    sampler = torch.utils.data.distributed.DistributedSampler(trainset,num_replicas=hvd.size(), rank=hvd.rank())
    trainloader = data.DataLoader(dataset=trainset, batch_size=args.train_batch * world_size, shuffle=False, sampler=sampler)

    testset = dataloader(root=args.dataroot, train=False, download=False, transform=transform_test)
    testloader = data.DataLoader(testset, batch_size=args.test_batch * world_size, shuffle=False, num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format("Alexnet"))
    model = AlexNet(num_classes=num_classes)

    device = torch.device('cuda', local_rank)
    model = model.to(device)
    # model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)    
    print('Model on cuda:%d' % local_rank)
    print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    # 用horovod封装优化器
    optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
    # 广播参数
    hvd.broadcast_parameters(model.state_dict(), root_rank=0)

    # Train and val
    for epoch in range(start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)
        train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, use_cuda)
        test_loss, test_acc = test(testloader, model, criterion, epoch, use_cuda)
        print('Rank:{} Epoch[{}/{}]: LR: {:.3f}, Train loss: {:.5f}, Test loss: {:.5f}, Train acc: {:.2f}, Test acc: {:.2f}.'.format(local_rank,epoch+1, args.epochs, state['lr'], 
        train_loss, test_loss, train_acc, test_acc))
コード例 #6
0
def get_prediction(image_bytes):
# 异常处理:防止传入非图片的东西
    try:
        weights_path = "./Alexnet.pth"
        class_json_path = "./class_indices.json"
        assert os.path.exists(weights_path), "weights path does not exist..."
        assert os.path.exists(class_json_path), "class json path does not exist..."

        # select device
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        print(device)
        # create model
        model = AlexNet(num_classes=5)
        # load model weights
        model.load_state_dict(torch.load(weights_path, map_location=device))
        model.to(device)
        model.eval()

        # load class info
        json_file = open(class_json_path, 'rb')
        class_indict = json.load(json_file)


        tensor = transform_image(image_bytes=image_bytes)
        outputs = torch.softmax(model.forward(tensor).squeeze(), dim=0)
        # detach去除梯度信息
        prediction = outputs.detach().cpu().numpy()
        # < 左对齐
        template = "class:{:<15} probability:{:.3f}"
        index_pre = [(class_indict[str(index)], float(p)) for index, p in enumerate(prediction)]
        # sort probability
        index_pre.sort(key=lambda x: x[1], reverse=True)
        text = [template.format(k, v) for k, v in index_pre]
        return_info = {"result": text}
    except Exception as e:
        return_info = {"result": [str(e)]}
    return return_info
コード例 #7
0
ファイル: train.py プロジェクト: kakamband/captcha_solver
def _main(data_dir, batch_size, learning_rate, n_epoch):
    '''
    main function
    '''
    # Create dataloader
    dataloaders_dict = create_dataloader(data_dir, batch_size)

    # Detect if we have a GPU available
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Create model
    model = AlexNet()
    model = model.to(device)

    # Observe that all parameters are being optimized
    optimizer_ft = optim.Adam(model.parameters(), lr=learning_rate)
    criterion = nn.CrossEntropyLoss()

    model = train_model(model, dataloaders_dict, criterion, optimizer_ft,
                        device, n_epoch)

    torch.save(model, 'model.pt')
コード例 #8
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #指定设备
    print("using {} device.".format(device))

    data_transform = { #数据预处理
        "train": transforms.Compose([transforms.RandomResizedCrop(224),# key 为trian 返回这些方法 随机裁剪 224*224
                                     transforms.RandomHorizontalFlip(),#随机反转
                                     transforms.ToTensor(),#转成
                                     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]),#标准化处理
        "val": transforms.Compose([transforms.Resize((224, 224)),  # cannot 224, must (224, 224)
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])}

    data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # get data root path
    image_path = os.path.join(data_root, "data_set", "dog_data")  # flower data set path
    assert os.path.exists(image_path), "{} path does not exist.".format(image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(image_path, "train"),
                                         transform=data_transform["train"])#数据预处理
    train_num = len(train_dataset) #个数

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx #获取名称所对应索引
    cla_dict = dict((val, key) for key, val in flower_list.items()) #遍历 key value 对调
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:#生成json 便于打开
        json_file.write(json_str)

    batch_size = 32
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size, shuffle=True,
                                               num_workers=nw) #加载

    validate_dataset = datasets.ImageFolder(root=os.path.join(image_path, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=4, shuffle=False,
                                                  num_workers=nw)

    print("using {} images for training, {} images fot validation.".format(train_num,
                                                                           val_num))
    # test_data_iter = iter(validate_loader)
    # test_image, test_label = test_data_iter.next()
    #
    # def imshow(img):
    #     img = img / 2 + 0.5  # unnormalize
    #     npimg = img.numpy()
    #     plt.imshow(np.transpose(npimg, (1, 2, 0)))
    #     plt.show()
    #
    # print(' '.join('%5s' % cla_dict[test_label[j].item()] for j in range(4)))
    # imshow(utils.make_grid(test_image))

    net = AlexNet(num_classes=5, init_weights=True) #类别5

    net.to(device) #网络设备
    loss_function = nn.CrossEntropyLoss() #损失函数
    # pata = list(net.parameters())
    optimizer = optim.Adam(net.parameters(), lr=0.0002) #adam优化器 对象是网络中可训练参数 学习率 自己调参

    save_path = './AlexNet.pth' #保存模型路径
    best_acc = 0.0
    for epoch in range(10):#训练
        # train
        net.train() #管理神经元失活
        running_loss = 0.0 #统计平均损失
        t1 = time.perf_counter() #训练时间
        for step, data in enumerate(train_loader, start=0): #遍历数据集
            images, labels = data #分为图像 标签
            optimizer.zero_grad() #清空梯度信息
            outputs = net(images.to(device)) #正向传播 指定设备
            loss = loss_function(outputs, labels.to(device)) #损失
            loss.backward() #反向传播
            optimizer.step() #更新结点参数

            # print statistics
            running_loss += loss.item() #损失累加
            # print train process
            rate = (step + 1) / len(train_loader) #打印训练进度
            a = "*" * int(rate * 50)
            b = "." * int((1 - rate) * 50)
            print("\rtrain loss: {:^3.0f}%[{}->{}]{:.3f}".format(int(rate * 100), a, b, loss), end="")
        print()
        print(time.perf_counter()-t1)

        # validate
        net.eval() #关闭失活
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            for val_data in validate_loader:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1] #最大就是类别
                acc += (predict_y == val_labels.to(device)).sum().item() #预测与真实对比 累加
            val_accurate = acc / val_num #准确率
            if val_accurate > best_acc: #如果准确率大于历史最优
                best_acc = val_accurate #更新
                torch.save(net.state_dict(), save_path) #保存权重
            print('[epoch %d] train_loss: %.3f  test_accuracy: %.3f' % #打印信息
                  (epoch + 1, running_loss / step, val_accurate))

    print('Finished Training')
コード例 #9
0
ファイル: train.py プロジェクト: Schenk75/MNIST-STM32
optimizer = optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.99))
criterion = nn.CrossEntropyLoss()

writer = SummaryWriter(outf + '/exp')
best_acc = float('-inf')
best_since_last = 0
for ep in range(start_epoch, epochs + 1):
    if best_since_last == 20: break
    elif best_since_last % 8 == 0 and best_since_last != 0:
        adjust_learning_rate(optimizer, 0.5)

    train_metrics = train(model, train_loader, optimizer, criterion, ep)
    test_metrics = test(model, test_loader, criterion)
    #     print(train_metrics)
    #     print(test_metrics)
    writer.add_scalar('train-acc', train_metrics['acc'], global_step=ep)
    writer.add_scalar('train-loss', train_metrics['loss'], global_step=ep)
    writer.add_scalar('test-acc', test_metrics['acc'], global_step=ep)
    writer.add_scalar('test-loss', test_metrics['loss'], global_step=ep)

    if best_acc < test_metrics['acc']:
        best_since_last = 0
        print(f'Found best at epoch {ep}\n')
        best_acc = test_metrics['acc']
        torch.save(model.cpu().state_dict(), outf + '/best.pth')
    else:
        best_since_last += 1

    torch.save(model.cpu().state_dict(), outf + '/last.pth')
    model.to(device)
コード例 #10
0
def train(args):
    device = torch.device(f"cuda:{args.device_id}")
    model = AlexNet(n_cls=100, useLRN=args.useLRN, useDropOut=args.useDropOut)
    # model = AlexNet(num_classes= 100)
    criterion = nn.CrossEntropyLoss()

    model.to(device)
    optimizer = Adam(model.parameters(), lr=args.lr)

    train_loader, valid_loader = getLoaders(split="train",
                                            batch_size=args.batch_size,
                                            num_workers=args.num_workers,
                                            aug=args.useAug)

    train_loss_arr = []
    valid_loss_arr = []
    valid_acc_arr = []
    valid_top5_arr = []
    n_iter = 0
    best_loss = float('inf')
    best_top1_acc = 0
    best_top5_acc = 0
    for ep in range(args.epoch):
        model.train()
        for _, (img, label) in tqdm(enumerate(train_loader),
                                    total=len(train_loader)):
            img, label = img.to(device), label.to(device)
            optimizer.zero_grad()
            pred = model(img)
            loss = criterion(pred, label)
            # loss = model.criterion(pred, label)
            loss.backward()
            optimizer.step()
            train_loss_arr.append(loss.item())
            n_iter += 1
        model.eval()
        ep_valid_loss_arr = []
        ep_acc_arr = []
        ep_top5_arr = []
        with torch.no_grad():
            for _, (img, label) in tqdm(enumerate(valid_loader),
                                        total=len(valid_loader)):
                img, label = img.to(device), label.to(device)
                pred = model(img)
                loss = criterion(pred, label)
                # loss = model.criterion(pred, label)
                acc = utils.top_k_acc(k=1,
                                      pred=pred.detach().cpu().numpy(),
                                      label=label.detach().cpu().numpy())
                acc5 = utils.top_k_acc(k=5,
                                       pred=pred.detach().cpu().numpy(),
                                       label=label.detach().cpu().numpy())
                ep_acc_arr.append(acc)
                ep_top5_arr.append(acc5)
                ep_valid_loss_arr.append(loss.item())
        valid_loss = np.mean(ep_valid_loss_arr)
        valid_acc = np.mean(ep_acc_arr)
        valid_top5 = np.mean(ep_top5_arr)
        train_loss = np.mean(train_loss_arr[-len(train_loader):])
        valid_loss_arr.append(valid_loss)
        if valid_loss < best_loss:
            best_loss = valid_loss
            best_top1_acc = valid_acc
            best_top5_acc = valid_top5
            model.cpu()
            torch.save(model.state_dict(), "best_model.pth")
            model.to(device)
        if (ep + 1) % 10 == 0:
            model.cpu()
            torch.save(
                {
                    "model": model.state_dict(),
                    "optimizer": optimizer.state_dict(),
                    "train_loss": train_loss_arr,
                    "valid_loss": valid_loss_arr,
                    "valid_acc": valid_acc_arr,
                    "valid_top5": valid_top5_arr,
                    "best_loss": best_loss,
                    "ep": ep,
                    "n_iter": n_iter,
                }, "model_checkpoint.pth")
            model.to(device)
        print(
            f"[{ep}, {n_iter}] train: {train_loss:.4f}, valid: {valid_loss:.4f}, acc: {valid_acc:.4f}, top5: {valid_top5:.4f}"
        )
    with open("exp_result.txt", "a+") as f:
        f.write(
            f"{args}, loss: {best_loss:.4f}, top1: {best_top1_acc*100:.1f}, top5: {best_top5_acc*100:.1f}\n"
        )
コード例 #11
0
def main():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print("using {} device.".format(device))

    tbwriter = SummaryWriter(log_dir="./logs")

    data_transform = {
        "train":
        transforms.Compose([
            transforms.RandomResizedCrop(360),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]),
        "val":
        transforms.Compose([
            transforms.Resize(360, 360),  # cannot 360, must (360,360)
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    }

    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "./DATA"))  # get data root path
    image_path = os.path.join(data_root, "male")  # flower data set path
    assert os.path.exists(image_path), "{} path does not exist.".format(
        image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "train"),
                                         transform=data_transform["train"])
    train_num = len(train_dataset)

    flower_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=2)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    batch_size = 8
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=8,
                                                  shuffle=True,
                                                  num_workers=nw)

    print("using {} images for training, {} images fot validation.".format(
        train_num, val_num))

    if os.path.exists("./log360.pth"):
        net = AlexNet()
        #net.load_state_dict(torch.load("./log360.pth", map_location='cuda:2'))
        net = torch.load("./log360.pth", 'cpu')
        print("continue training")
    else:
        net = AlexNet(num_classes=3, init_weights=True)
        net.to(device)
        print("start training anew")

    loss_function = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=0.0001)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.98)

    epochs = 2000
    save_path = './AlexNet.pth'
    best_acc = 0.0
    train_steps = len(train_loader)

    #json_path = './class_indices.json'
    #json_file = open(json_path, "r")
    #class_indict = json.load(json_file)
    #model = AlexNet(num_classed=6).to(device)

    trainLOSS = []  #save loss
    testLOSS = []  #save loss
    valACC = []  #save val acc

    for epoch in range(epochs):
        scheduler.step()
        print('LR:{}'.format(scheduler.get_lr()[0]))
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            outputs = net(images.to(device))
            loss = loss_function(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(
                epoch + 1, epochs, loss)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(validate_loader, colour='green')
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_num

        tbwriter.add_scalar('train/loss', running_loss / train_steps, epoch)
        tbwriter.add_scalar('val/acc', val_accurate, epoch)

        trainLOSS.append(running_loss / train_steps)
        valACC.append(val_accurate)

        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))
        print(' ')

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

        #predict
        #weights_path="./AlexNet.pth"
        #model.load_state_dict(torch.load(weights_path))

        #model.eval()
        #with torch.no_grad():
        #    putput = torch.squeeze(model(img.to(device))).cpu()
        #    predict = torch.softmax(output, dim=0)
        #    predict_cla = torch.argmax(predict.numpy)

    npLOSS = np.array(trainLOSS)
    npVALACC = np.array(valACC)
    np.save('./save/loss_epoch_{}'.format(epoch), npLOSS)
    np.save('./save/valacc_epoch_{}'.format(epoch), npVALACC)

    print('Finished Training')
コード例 #12
0
def main():
    # viz = Visdom()
    # viz.line([0.], [0.], win='train_loss', opts=dict(title='train loss'))

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    data_transform = {
        "train":
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ]),
        "val":
        transforms.Compose([
            transforms.Resize((224, 224)),  # cannot 224, must (224, 224)
            transforms.ToTensor()
        ])
    }

    data_root = "/home/zhongsy/datasets/dataset/"  # get data root path
    train_dataset = datasets.ImageFolder(root=os.path.join(data_root, "train"),
                                         transform=data_transform["train"])

    # print(train_dataset.imgs)
    train_num = len(train_dataset)

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    batch_size = 1
    # number of workers
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=32,
                                               shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(
        data_root, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=4,
                                                  shuffle=False,
                                                  num_workers=nw)

    print("using {} images for training, {} images for validation.".format(
        train_num, val_num))
    # test_data_iter = iter(validate_loader)
    # test_image, test_label = test_data_iter.next()
    #
    # def imshow(img):
    #     img = img / 2 + 0.5  # unnormalize
    #     npimg = img.numpy()
    #     plt.imshow(np.transpose(npimg, (1, 2, 0)))
    #     plt.show()
    #
    # print(' '.join('%5s' % cla_dict[test_label[j].item()] for j in range(4)))
    # imshow(utils.make_grid(test_image))

    net = AlexNet(num_classes=2, init_weights=True)

    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    # pata = list(net.parameters())
    optimizer = optim.Adam(net.parameters(), lr=0.0001)

    epochs = 30
    save_path = './AlexNet.pt'
    best_acc = 0.0
    train_steps = len(train_loader)
    global_step = 0
    for epoch in range(epochs):
        # train
        epochloss = 100000
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data

            # print("label: ", labels, labels.dtype)
            optimizer.zero_grad()
            outputs = net(images.to(device))
            # print("imges: ", images, images.dtype)
            # outputs_ = outputs.squeeze()
            # print("output__ : ", outputs_)
            # outputs_ = outputs.to(torch.float)
            loss = loss_function(outputs, labels.to(device))
            # loss = loss.to(torch.float)
            if epochloss > loss:
                epochloss = loss
            loss.backward()
            optimizer.step()
            # print statistics
            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(
                epoch + 1, epochs, loss)
        # viz.line([epochloss.cpu().detach().numpy()], [global_step],
        #  win='train_loss', update='append')
        global_step += 1

        print("[ start val ]")
        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                val_labels.unsqueeze(1)
                outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                # print("prect ;", predict_y)
                # outputs = outputs.squeeze()
                # print("out_puts: ", outputs)
                # a = torch.gt(outputs, 0.5)
                # print("a ", a)
                # for i, (data, label_) in enumerate(zip(outputs, val_labels)):
                #     if abs(data-label_) <= 0.5:
                #         acc += 1
                # viz.images(val_images.view(-1, 3, 224, 224), win='x')
                # viz.text(str(predict_y.detach().cpu().numpy()),
                #  win='pred', opts=dict(title='pred'))
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

                val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net, save_path)

    print('Finished Training')
コード例 #13
0
# test_data_iter = iter(validate_loader)
# test_image, test_label = test_data_iter.next()
#
# def imshow(img):
#     img = img / 2 + 0.5  # unnormalize
#     npimg = img.numpy()
#     plt.imshow(np.transpose(npimg, (1, 2, 0)))
#     plt.show()
#
# print(' '.join('%5s' % cla_dict[test_label[j].item()] for j in range(4)))
# imshow(utils.make_grid(test_image))

net = AlexNet(num_classes=5, init_weights=True)  # 构造一个新模型
#net.load_state_dict(torch.load("./AlexNet.pth"))
net.to(device)  # 将数据迁移到GPU(如果有的话)
loss_function = nn.CrossEntropyLoss()  # 损失函数
#pata = list(net.parameters())
optimizer = optim.Adam(net.parameters(), lr=0.0002)  # 梯度下降优化器

save_path = './AlexNet.pth'
best_acc = 0.0  # 历史最高准确率(每一个epoch得到的模型都有一个新的准确率)
for epoch in range(10):
    net.train()  # 训练模式开启
    running_loss = 0.0
    t1 = time.perf_counter()
    for step, data in enumerate(train_loader, start=0):
        images, labels = data
        optimizer.zero_grad()  # 各参数梯度置零,否则梯度会累加
        outputs = net(images.to(device))  # 将数据正向过一遍网络,得到一个输出
        loss = loss_function(outputs, labels.to(device))  # 计算输出值和期望值的某种误差
コード例 #14
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    data_transform = {
        "train":
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]),
        "val":
        transforms.Compose([
            transforms.Resize((224, 224)),  # cannot 224, must (224, 224)
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    }

    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "../.."))  # get data root path
    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    assert os.path.exists(image_path), "{} path does not exist.".format(
        image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "train"),
                                         transform=data_transform["train"])
    train_num = len(train_dataset)

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    batch_size = 32
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=4,
                                                  shuffle=False,
                                                  num_workers=nw)

    print("using {} images for training, {} images for validation.".format(
        train_num, val_num))
    # test_data_iter = iter(validate_loader)
    # test_image, test_label = test_data_iter.next()
    #
    # def imshow(img):
    #     img = img / 2 + 0.5  # unnormalize
    #     npimg = img.numpy()
    #     plt.imshow(np.transpose(npimg, (1, 2, 0)))
    #     plt.show()
    #
    # print(' '.join('%5s' % cla_dict[test_label[j].item()] for j in range(4)))
    # imshow(utils.make_grid(test_image))

    net = AlexNet(num_classes=5, init_weights=True)

    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    # pata = list(net.parameters())
    optimizer = optim.Adam(net.parameters(), lr=0.0002)

    epochs = 10
    save_path = './AlexNet.pth'
    best_acc = 0.0
    train_steps = len(train_loader)
    for epoch in range(epochs):
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            outputs = net(images.to(device))
            loss = loss_function(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(
                epoch + 1, epochs, loss)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')
コード例 #15
0
ファイル: server.py プロジェクト: Schenk75/MNIST-STM32
bps = 115200
flag = False  # 判断串口打开是否成功
data = ''
# 自定义灰度界限,大于这个值为白色,小于这个值为黑色(与原图黑白颠倒)
threshold = 100
# 二值化处理表
table = []
for i in range(256):
    if i < threshold:
        table.append(1)
    else:
        table.append(0)

model = AlexNet()
model.load_state_dict(torch.load('./logs/mnist/best.pth'))
model = model.to(device)

try:
    # 打开串口,并得到串口对象
    ser = serial.Serial(port, bps)
    # 判断是否打开成功
    if ser.is_open:
        flag = True
        print('Server start!')
except Exception as e:
    print("ERROR: ", e)

if flag:
    while True:
        time.sleep(0.5)
        data += ser.read_all().decode('ascii')
コード例 #16
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("using {} device.".format(device))

    batch_size = 16
    epochs = 20

    data_transform = {
        "train":
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        "val":
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }

    data_root = os.path.abspath(os.path.join(os.getcwd(),
                                             "."))  # get data root path
    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    assert os.path.exists(image_path), "{} path does not exist.".format(
        image_path)
    train_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "train"),
                                         transform=data_transform["train"])
    train_num = len(train_dataset)

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=nw)

    validate_dataset = datasets.ImageFolder(root=os.path.join(
        image_path, "val"),
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=nw)

    print("using {} images for training, {} images for validation.".format(
        train_num, val_num))

    # create model
    net = AlexNet(num_classes=5)

    net.to(device)

    # define loss function
    loss_function = nn.CrossEntropyLoss()

    # construct an optimizer
    params = [p for p in net.parameters() if p.requires_grad]
    optimizer = optim.Adam(params, lr=0.0001)

    best_acc = 0.0
    save_path = 'weights/alexnet.pth'
    train_steps = len(train_loader)
    for epoch in range(epochs):
        # train
        net.train()
        running_loss = 0.0
        train_bar = tqdm(train_loader)
        for step, data in enumerate(train_bar):
            images, labels = data
            optimizer.zero_grad()
            logits = net(images.to(device))
            loss = loss_function(logits, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(
                epoch + 1, epochs, loss)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                # loss = loss_function(outputs, test_labels)
                predict_y = torch.max(outputs, dim=1)[1]
                acc += torch.eq(predict_y, val_labels.to(device)).sum().item()

                val_bar.desc = "valid epoch[{}/{}]".format(epoch + 1, epochs)
        val_accurate = acc / val_num
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:
            best_acc = val_accurate
            torch.save(net.state_dict(), save_path)

    print('Finished Training')
コード例 #17
0
# test_data_iter = iter(validate_loader)
# test_image, test_label = test_data_iter.next()
#
# def imshow(img):
#     img = img / 2 + 0.5  # unnormalize
#     npimg = img.numpy()
#     plt.imshow(np.transpose(npimg, (1, 2, 0)))
#     plt.show()
#
# print(' '.join('%5s' % cla_dict[test_label[j].item()] for j in range(4)))
# imshow(utils.make_grid(test_image))


net = AlexNet(num_classes=5, init_weights=True)

net.to(device)
loss_function = nn.CrossEntropyLoss()
# pata = list(net.parameters())
# lr: learning_rate
optimizer = optim.Adam(net.parameters(), lr=0.0002)

save_path = r'D:\Document\GitHub\deep-learning-for-image-processing\pytorch_classification\Test2_alexnet\AlexNet.pth'
best_acc = 0.0
epo = 20

for epoch in range(epo):
    # train
    net.train()
    running_loss = 0.0
    t1 = time.perf_counter()
    #从train_loader,加载一个batch
コード例 #18
0
def train():
    try:
        os.makedirs(opt.checkpoints_dir)
    except OSError:
        pass
    if torch.cuda.device_count() > 1:
        model = torch.nn.parallel.DataParallel(
            AlexNet(num_classes=opt.num_classes))
    else:
        model = AlexNet(num_classes=opt.num_classes)
    if os.path.exists(MODEL_PATH):
        model.load_state_dict(
            torch.load(MODEL_PATH, map_location=lambda storage, loc: storage))
    model.to(device)
    ################################################
    # Set loss function and Adam optimizer
    ################################################
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)

    for epoch in range(opt.epochs):
        # train for one epoch
        print(f"\nBegin Training Epoch {epoch + 1}")
        # Calculate and return the top-k accuracy of the model
        # so that we can track the learning process.
        losses = AverageMeter()
        top1 = AverageMeter()
        top5 = AverageMeter()

        for i, data in enumerate(train_dataloader):
            # get the inputs; data is a list of [inputs, labels]
            inputs, targets = data
            inputs = inputs.to(device)
            targets = targets.to(device)

            # compute output
            output = model(inputs)
            loss = criterion(output, targets)

            # measure accuracy and record loss
            prec1, prec5 = accuracy(output, targets, topk=(1, 2))
            losses.update(loss.item(), inputs.size(0))
            top1.update(prec1, inputs.size(0))
            top5.update(prec5, inputs.size(0))

            # compute gradients in a backward pass
            optimizer.zero_grad()
            loss.backward()

            # Call step of optimizer to update model params
            optimizer.step()

            print(
                f"Epoch [{epoch + 1}] [{i + 1}/{len(train_dataloader)}]\t"
                f"Loss {loss.item():.4f}\t"
                f"Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t"
                f"Prec@5 {top5.val:.3f} ({top5.avg:.3f})",
                end="\r")

        # save model file
        torch.save(model.state_dict(), MODEL_PATH)
コード例 #19
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    data_transform = {
        "train":
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]),
        "val":
        transforms.Compose([
            transforms.Resize((224, 224)),  # cannot 224, must (224, 224)
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    }

    data_root = os.path.abspath(os.path.join(os.getcwd(), "./"))
    image_path = os.path.join(data_root, "flower_data")
    train_dataset = datasets.ImageFolder(root=image_path + "/train",
                                         transform=data_transform['train'])
    train_num = len(train_dataset)

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx
    cla_dict = dict((val, key) for key, val in flower_list.items())
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    batch_size = 8
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=0)

    validate_dataset = datasets.ImageFolder(root=image_path + "/val",
                                            transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(validate_dataset,
                                                  batch_size=4,
                                                  shuffle=True,
                                                  num_workers=0)

    # test_data_iter = iter(validate_loader)
    # test_image, test_label = test_data_iter.next()
    #
    # def imshow(img):
    #     img = img / 2 + 0.5  # unnormalize
    #     npimg = img.numpy()
    #     plt.imshow(np.transpose(npimg, (1, 2, 0)))
    #     plt.show()
    #
    # print(' '.join('%5s' % cla_dict[test_label[j].item()] for j in range(4)))
    # imshow(utils.make_grid(test_image))

    net = AlexNet(num_class=5)
    print(net)
    net.to(device)
    loss_function = nn.CrossEntropyLoss()
    # pata = list(net.parameters())
    optimizer = optim.Adam(net.parameters(), lr=0.0002)

    save_path = './AlexNet.pth'
    best_acc = 0.0
    for epoch in range(10):
        # train
        net.train()
        running_loss = 0.0
        t1 = time.perf_counter()
        for step, data in enumerate(train_loader, start=0):
            images, labels = data
            optimizer.zero_grad()
            outputs = net(images.to(device))
            loss = loss_function(outputs, labels.to(device))
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            # print train process
            rate = (step + 1) / len(train_loader)
            a = "*" * int(rate * 50)
            b = "." * int((1 - rate) * 50)
            print("\rtrain loss: {:^3.0f}%[{}->{}]{:.3f}".format(
                int(rate * 100), a, b, loss),
                  end="")
        print()
        print(time.perf_counter() - t1)

        # validate
        net.eval()
        acc = 0.0  # accumulate accurate number / epoch

        # 验证过程中不计算损失梯度
        with torch.no_grad():
            for val_data in validate_loader:
                val_images, val_labels = val_data
                outputs = net(val_images.to(device))
                predict_y = torch.max(outputs, dim=1)[1]
                acc += (predict_y == val_labels.to(device)).sum().item()
            val_accurate = acc / val_num
            if val_accurate > best_acc:
                best_acc = val_accurate
                torch.save(net.state_dict(), save_path)
            print('[epoch %d] train_loss: %.3f  test_accuracy: %.3f' %
                  (epoch + 1, running_loss / step, val_accurate))

    print('Finished Training')
コード例 #20
0
    num_workers=0)

# 存储 索引:标签 的字典
# 字典,类别:索引 {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
flower_list = train_dataset.class_to_idx
# 将 flower_list 中的 key 和 val 调换位置
cla_dict = dict((val, key) for key, val in flower_list.items())

# 将 cla_dict 写入 json 文件中
json_str = json.dumps(cla_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
    json_file.write(json_str)

#训练过程
net = AlexNet(num_classes=5, init_weights=True)  # 实例化网络(输出类型为5,初始化权重)
net.to(device)  # 分配网络到指定的设备(GPU/CPU)训练
loss_function = nn.CrossEntropyLoss()  # 交叉熵损失
optimizer = optim.Adam(net.parameters(), lr=0.0002)  # 优化器(训练参数,学习率)

save_path = './AlexNet.pth'
best_acc = 0.0

for epoch in range(150):
    ########################################## train ###############################################
    net.train()  # 训练过程中开启 Dropout
    running_loss = 0.0  # 每个 epoch 都会对 running_loss  清零
    time_start = time.perf_counter()  # 对训练一个 epoch 计时

    for step, data in enumerate(train_loader, start=0):  # 遍历训练集,step从0开始计算
        images, labels = data  # 获取训练集的图像和标签
        optimizer.zero_grad()  # 清除历史梯度
コード例 #21
0
ファイル: main.py プロジェクト: yldang/MLPJ
        # print(alexnet_dict.keys())
        print("Load from pretrained")

    # Freeze parameter
    if freeze_layer:
        for name, value in alexnet.named_parameters():
            if (name != "classifier.6.weight") and (name != "classifier.6.bias"):
                value.requires_grad = False
        print("Freeze layer")

    # train on multiple GPUs
    DEVICE_IDS = list(range(GPU_NUM))
    # alexnet = alexnet.to(device)
    if GPU_NUM > 1:
        alexnet = torch.nn.parallel.DataParallel(alexnet, device_ids=DEVICE_IDS)
    alexnet = alexnet.to(device)
    print(alexnet)
    print("Network created")

    # data normalization
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    if not testing_only:
        # create data loader
        dataloader_train = DataLoader(
            datasets.CIFAR10(root='./data', train=True, transform=transforms.Compose([
                transforms.RandomHorizontalFlip(),
                transforms.RandomCrop(32, 4),
                transforms.ToTensor(),
                normalize,
コード例 #22
0
def model_retrain(number_class):
    model = AlexNet(number_class)
    model = model.to(device)
    return model
コード例 #23
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else
                          "cpu")  # torch.device规定训练中所使用的设备
    print("using {} device.".format(device))

    data_transform = {  # data_transform数据预处理    
        "train":
        transforms.Compose([
            transforms.RandomResizedCrop(224),  # 随机裁剪为224*224
            transforms.RandomHorizontalFlip(),  # 水平方向随机翻转
            transforms.ToTensor(),  # 转化为tensor     
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]),  # 标准化处理
        "val":
        transforms.Compose([
            transforms.Resize((224, 224)),  # * cannot 224, must (224, 224)
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    }
    print(os.getcwd())
    # data_root = os.path.abspath(os.path.join(os.getcwd(), "../.."))  # get data root path
    # 先获取数据集所在的根目录os.getcwd()
    # ^ os.getcwd() 返回当前进程的工作目录,并非当前文件所在的目录
    # "../.."表示的是上两层目录,这个要看具体的情况,这是一个相对路径的写法
    # ^ os.path.join 路径拼接,拼接后得到的就是当前目录的上两级目录
    # ^ os.path.abspath() 获取指定文件或目录的绝对路径(完整路径)

    data_root = os.path.abspath(os.getcwd())

    image_path = os.path.join(data_root, "data_set",
                              "flower_data")  # flower data set path
    # 等价于 image_path = data_root + "data_set/flower_data"
    # assert os.path.exists(image_path), "{} path does not exist.".format(image_path)

    train_dataset = datasets.ImageFolder(
        root=os.path.join(image_path, "train"),  # 下载数据集 ,"train"表示是训练集数据   
        transform=data_transform["train"])  # 使用"train"的预处理方式
    train_num = len(train_dataset)  # 查看训练集有多少张图片

    # {'daisy':0, 'dandelion':1, 'roses':2, 'sunflower':3, 'tulips':4}
    flower_list = train_dataset.class_to_idx  # * .class_to_idx 得到分类名称对应的索引
    cla_dict = dict(
        (val, key) for key, val in flower_list.items())  # * 将刚刚字典的键值对 变为 值键对
    # write dict into json file
    json_str = json.dumps(cla_dict, indent=4)  # 将刚刚的字典变为json形式
    with open('class_indices.json', 'w') as json_file:
        json_file.write(json_str)

    batch_size = 32
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    print('Using {} dataloader workers every process'.format(nw))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,  # 加载数据集
        batch_size=batch_size,
        shuffle=True,  # 通过batchsize和随机参数从样本中获取一批批数据
        num_workers=nw)  # wins下num_workers一般设置为0,linux下num_workers设置可以分布式计算

    validate_dataset = datasets.ImageFolder(
        root=os.path.join(
            image_path, "val"
        ),  # root=os.path.join(image_path, "val")等价于 root=image_path+"val"
        transform=data_transform["val"])
    val_num = len(validate_dataset)
    validate_loader = torch.utils.data.DataLoader(
        validate_dataset,
        batch_size=batch_size,
        shuffle=False,  #   batch_size=4, shuffle=True,    
        num_workers=nw)

    print("using {} images for training, {} images for validation.".format(
        train_num, val_num))

    # 下面是查看数据集的demo
    # 注意,第60行的batch_size=4, shuffle=True再查看:

    # test_data_iter = iter(validate_loader)
    # test_image, test_label = test_data_iter.next()

    # def imshow(img):
    #     img = img / 2 + 0.5  # unnormalize
    #     npimg = img.numpy()
    #     plt.imshow(np.transpose(npimg, (1, 2, 0)))
    #     plt.show()

    # print(' '.join('%5s' % cla_dict[test_label[j].item()] for j in range(4)))
    # imshow(utils.make_grid(test_image))

    net = AlexNet(num_classes=5, init_weights=True)  # 5个类别的花数据集,初始化权重为True
    # 实例化模型对象 net

    net.to(device)  # ^ net.to(device)将网络放入刚刚指定的设备中
    loss_function = nn.CrossEntropyLoss()  # 定义损失函数,多类别的交叉熵函数
    # pata = list(net.parameters())                             # 调试所用,查看模型的参数
    optimizer = optim.Adam(
        net.parameters(),
        lr=0.0002)  # 定义Adam优化器,优化对象是网络中所有的可训练参数net.parameters(),以及学习了lr=0.0002

    epochs = 10
    save_path = './AlexNet.pth'  # 保存权重的路径
    best_acc = 0.0  # 最佳准确率 best_acc,首先初始化为0,后面再更新
    train_steps = len(train_loader)

    for epoch in range(epochs):  # 迭代10次
        # * 因为使用了dropout,只在训练中使用,预测中不使用

        # train                                                 #  & 训练阶段
        net.train()  # 调用net.train()进入训练阶段,同时使用 dropout 方法
        running_loss = 0.0  # 统计训练中的平均损失
        train_bar = tqdm(train_loader)  # 为了统计训练一个epoch所需时间
        for step, data in enumerate(train_bar):  # 遍历数据集;数据集分为图像和标签
            images, labels = data
            optimizer.zero_grad()  # 梯度清0
            outputs = net(
                images.to(device))  # 正向传播,图像放入设备中,然后实例化AlexNet的网络net中
            loss = loss_function(
                outputs, labels.to(device))  # 计算损失,计算预测值与真实值的损失,这里label也要放入设备中
            loss.backward()  # 反向传播到每一个节点
            optimizer.step()  # 更新每一个节点的参数

            # print statistics
            running_loss += loss.item()  # 累加loss值

            train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(
                epoch + 1, epochs, loss)  # 为了或者训练进度

        # validate                                                  # & 测试阶段
        net.eval()  # 调用net.eval() 进入测试阶段,同时关闭 dropout 方法
        acc = 0.0  # accumulate accurate number / epoch
        with torch.no_grad():  # * with torch.no_grad() 禁止参数跟踪:验证中不计算损失梯度
            val_bar = tqdm(validate_loader)
            for val_data in val_bar:
                val_images, val_labels = val_data  # 数据划分为图片和对应的标签
                outputs = net(
                    val_images.to(device))  # 放入网络net中得到输出,输出的维度是 [batch, 10]
                predict_y = torch.max(
                    outputs, dim=1
                )[1]  # 求出输出的第1个维度(dim=1类别维度)max(只关注最大值对应的位置[1],不关心数值  ),得到预测值 predict_y
                acc += torch.eq(predict_y, val_labels.to(
                    device)).sum().item()  # 统计预测正确的个数   # ^ 通过.item()得到相应的数值
                # acc += (predict_y == val_labels.to(device)).sum().item()      # 等价的

        val_accurate = acc / val_num  # 累加的准确率除以样本个数,得到平均准确率
        print('[epoch %d] train_loss: %.3f  val_accuracy: %.3f' %
              (epoch + 1, running_loss / train_steps, val_accurate))

        if val_accurate > best_acc:  # 如果当前准确率大于历史最优准确率
            best_acc = val_accurate  # 更新
            torch.save(net.state_dict(), save_path)

    print('Finished Training')