示例#1
0
from torch.utils.data import DataLoader
from torch.utils.data import Dataset

from class2 import RMBDataset
from model.lenet import LeNet

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# ========================== 1 num_samples=0
flag = 0
# flag = 1
if flag:
    # train_dir = os.path.join("..", "data", "rmb_split", "train")
    train_dir = "/dataset/rmb_split/train"
    train_data = RMBDataset(data_dir=train_dir)

    # 构建DataLoder
    train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True)

# ========================== 2
# TypeError: pic should be PIL Image or ndarray. Got <class 'torch.Tensor'>
# flag = 0
flag = 1
if flag:
    train_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.FiveCrop(200),
        transforms.Lambda(lambda crops: torch.stack(
            [(transforms.ToTensor()(crop)) for crop in crops])),
示例#2
0
    fake_img = torch.rand(512, 512, 3)
    writer.add_image("fake_img", fake_img, 5, dataformats="HWC")

    writer.close()

# ----------------------------------- make_grid -----------------------------------
flag = 0
# flag = 1
if flag:
    writer = SummaryWriter(comment='_grid', filename_suffix="_grid")

    split_dir = "/home/liuhy/res/deep-learning/00.框架/pytorch/class2/01.rmb/dataset/rmb_split"
    train_dir = os.path.join(split_dir, "train")

    transform_compose = transforms.Compose([transforms.Resize((64, 128)), transforms.ToTensor()])
    train_data = RMBDataset(data_dir=train_dir, transform=transform_compose)
    train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True)
    data_batch, label_batch = next(iter(train_loader))

    img_grid = vutils.make_grid(data_batch, nrow=4, normalize=True, scale_each=True)
    # img_grid = vutils.make_grid(data_batch, nrow=4, normalize=False, scale_each=False)
    writer.add_image("input img", img_grid, 0)

    writer.close()

# ----------------------------------- add_graph -----------------------------------
# flag = 0
flag = 1
if flag:
    writer = SummaryWriter(comment='_graph', filename_suffix="_graph")
train_transform = transforms.Compose([
    transforms.Resize((32, 32)),
    transforms.RandomCrop(32, padding=4),
    transforms.RandomGrayscale(p=0.8),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

valid_transform = transforms.Compose([
    transforms.Resize((32, 32)),
    transforms.ToTensor(),
    transforms.Normalize(norm_mean, norm_std),
])

# 构建MyDataset实例
train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)

# 构建DataLoder
train_loader = DataLoader(dataset=train_data,
                          batch_size=BATCH_SIZE,
                          shuffle=True)
valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)

# ============================ step 2/5 模型 ============================

net = LeNet(classes=2)
net.initialize_weights()

# ============================ step 3/5 损失函数 ============================
criterion = nn.CrossEntropyLoss()  # 选择损失函数