示例#1
0
def generate_mean_std():
    means = [0.0, 0.0, 0.0]
    stdevs = [0.0, 0.0, 0.0]

    img_size = 288

    data_transform = transforms.Compose([
        Resize((img_size, img_size)),
        transforms.ToTensor(),
    ])

    img_path = './train_data_v2/'
    img_paths = os.listdir(img_path)
    resizeor = Resize((img_size, img_size))
    data_len = len(img_paths)
    for i, name in enumerate(img_paths):
        img = Image.open(img_path + name).convert('RGB')
        img = resizeor(img)
        img = np.array(img)
        for j in range(3):
            means[j] += img[j, :, :].mean()
            stdevs[j] += img[j, :, :].std()
        print('{} / {} was complete '.format(i, data_len))

    means = np.asarray(means) / (data_len * 255)
    stdevs = np.asarray(stdevs) / (data_len * 255)

    print('normMean = {}'.format(means))
    print('normStd = {}'.format(stdevs))

    with open('./mean_and_std.txt', 'wb') as f:
        pickle.dump(means, f)
        pickle.dump(stdevs, f)
def main():
    parser = argparse.ArgumentParser(
        description='Binary MRI Quality Classification')
    parser.add_argument('--yaml_path',
                        type=str,
                        metavar='YAML',
                        default="config/acdc_binary_classification.yaml",
                        help='Enter the path for the YAML config')
    args = parser.parse_args()

    yaml.add_constructor("!join", yaml_var_concat)

    yaml_path = args.yaml_path
    with open(yaml_path, 'r') as f:
        train_args = yaml.load(f, Loader=yaml.Loader)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    composed = transforms.Compose([
        Resize((224, 224)),
        OneToThreeDimension(),
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    acdc_dataset = ACDCDataset(train_args["pos_samps_test"],
                               train_args["neg_samps_test"],
                               transform=composed)

    dataloader = DataLoader(acdc_dataset,
                            batch_size=train_args["batch_size"],
                            shuffle=False,
                            num_workers=4)
    dataset_size = len(acdc_dataset)

    model_ft = get_model(train_args["model"],
                         device,
                         pretrained=train_args["pretrained"])
    state = get_most_recent_model(train_args["model"],
                                  train_args["model_save_dir"])
    model_ft.load_state_dict(state)

    test(model_ft, dataloader, dataset_size, device=device)
示例#3
0
tbs = 8
epochs = 50
lr = 1e-3

def props_to_onehot(props):
    if isinstance(props, list):
        props = np.array(props)
    a = np.argmax(props, axis=1)
    b = np.zeros((len(a), props.shape[1]))
    b[np.arange(len(a)), a] = 1
    return b

transformed_dataset = PosterDataset(csv_file='./data.txt',
                                    root_dir='../data/posters/posters',
                                    transform=transforms.Compose([
                                        Resize(),
                                        ToTensor()
                                    ]))
train_size = int(0.8*len(transformed_dataset)+1)
test_size = int(0.2*len(transformed_dataset))
train_dataset, test_dataset = random_split(transformed_dataset, [train_size, test_size])
data_loader1 = DataLoader(train_dataset, batch_size=bs,shuffle=True)
data_loader2 = DataLoader(test_dataset, batch_size=tbs,shuffle=True)
print('train batches: ', len(data_loader1))
print('test batches: ', len(data_loader2))

device = torch.device('cuda')
model = resnet101().to(device)
criteon = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
train_loss = []
示例#4
0
import torch
from torch import nn
from torchvision import transforms, utils
from torch.utils.data import DataLoader
from dataset import PosterDataset, Resize, ToTensor
import numpy as np
import warnings
warnings.filterwarnings("ignore")

bs = 32

transformed_dataset = PosterDataset(csv_file='./data.txt',
                                    root_dir='../data/fgsm/FGSM',
                                    transform=transforms.Compose(
                                        [Resize(), ToTensor()]))
data_loader = DataLoader(transformed_dataset, batch_size=bs, shuffle=False)
print('train batches: ', len(data_loader))

device = torch.device('cuda')
org_model = torch.load('../data/models/origin_model.pkl').cuda()
fgsm_model = torch.load('../data/models/fgsm_model.pkl').cuda()
pgd_model = torch.load('../data/models/pgd_model.pkl').cuda()


def props_to_onehot(props):
    if isinstance(props, list):
        props = np.array(props)
    a = np.argmax(props, axis=1)
    b = np.zeros((len(a), props.shape[1]))
    b[np.arange(len(a)), a] = 1
    return b
示例#5
0
    parser.add_argument('--image_size',
                        type=int,
                        help='Image size (height, width) in pixels.',
                        default=224)
    parser.add_argument('--batch_size',
                        type=int,
                        help='Number of images to process in a batch.',
                        default=16)
    parser.add_argument('--num_of_workers',
                        type=int,
                        help='Number of subprocesses to use for data loading.',
                        default=0)
    return parser.parse_args(argv)


if __name__ == '__main__':
    args = parse_arguments(sys.argv[1:])
    eval_transform = transforms.Compose([
        Resize(args.image_size),
        transforms.ToTensor(),
    ])

    test_dataset = RGBD_Dataset(args.test_dataset_csv,
                                input_channels=args.input_channels,
                                transform=eval_transform)

    acc, loss = evaluation(test_dataset,
                           batch_size=args.batch_size,
                           num_of_workers=args.num_of_workers,
                           pretrained_model_path=args.pretrained_model_path)
num_classes = 40
BATCH_SIZE = 128
LEARNING_RATE = 1e-3

for index, model_name in enumerate(model_names):

    # prepare data --------------------------------
    model_configs = pretrainedmodels.pretrained_settings[model_name][
        'imagenet']
    input_size = model_configs['input_size']
    mean = model_configs['mean']
    std = model_configs['std']
    img_size = (input_size[1], input_size[2])
    # print(img_size, mean, std)
    data_transforms = transforms.Compose([
        Resize(img_size),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(30),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    dataset = GarbageDataset('./garbage_classify_v2/train_data_v2/',
                             './test.csv', data_transforms)

    dataloader = DataLoader(dataset,
                            batch_size=BATCH_SIZE,
                            shuffle=True,
                            num_workers=8)

    # fine-tune model define --------------------------------
import time
import torch.nn as nn
import tqdm
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
from dataset import Resize, GarbageDataset

img_size = 224

means = [0.20903548, 0.21178319, 0.21442725]
stds = [0.12113936, 0.12205944, 0.12315971]

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

data_transforms = transforms.Compose([
    Resize((img_size, img_size)),
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotation(30),
    transforms.ToTensor(),
    transforms.Normalize(means, stds)
])

dataset = GarbageDataset('./train_data_v2', './test.csv', data_transforms)
# '../../input/garbage_dataset/garbage_classify_v2/train_data_v2/' 服务器上
dataloader = DataLoader(dataset, batch_size=64, shuffle=True)

num_classes = 40
model_ft = models.resnet50(pretrained=True)

for param in model_ft.parameters():
    param.requires_grad = False
示例#8
0
import os
import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
from resnet import resnet34
from dataset import PlateData, Resize, ToTensor, RandomNoise
from model import Model

# prepare data
root_dir = '../../final/train_data/train-data'
tsfm = transforms.Compose([Resize((70, 356)), ToTensor()])
trainset = PlateData('data/all-data.txt', root_dir, transform=tsfm)
testset = PlateData('data/all-data.txt', root_dir, transform=tsfm)
trainloader = DataLoader(trainset, batch_size=8, shuffle=True)
test_trainloader = DataLoader(trainset, batch_size=50)
testloader = DataLoader(testset, batch_size=100)

device = 'cuda'

# prepare model
model = resnet34(pretrained=False)
model.load_state_dict(torch.load('./model-ckpt/resnet34-best.pth'),
                      strict=True)

# model = Model()
model.to(device)

# define loss function and optimizer
import pandas as pd
import pretrainedmodels
from torch.utils.data import DataLoader
from torchvision import transforms, models
from dataset import Resize, AllImageDataset

BATCH_SIZE = 64
num_classes = 40
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

model_ft = models.resnet18(pretrained=True)
mean = [0.20903548, 0.21178319, 0.21442725]
std = [0.12113936, 0.12205944, 0.12315971]
# print(img_size, mean, std)
data_transforms = transforms.Compose([
    Resize((224, 224)),
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotation(30),
    transforms.ToTensor(),
    transforms.Normalize(mean, std)
])

dataset = AllImageDataset('./tot_data', data_transforms)
dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False)

# print(model_ft)
for name, params in model_ft.named_parameters():
    params.requires_grad = False
model_ft_ftr = model_ft.fc.in_features
model_ft.fc = nn.Linear(model_ft_ftr, num_classes)
model_ft.to(device)
    parser.add_argument('--yaml_path', type=str, metavar='YAML',
                        default="config/acdc_binary_classification.yaml",
                        help='Enter the path for the YAML config')
    args = parser.parse_args()

    yaml.add_constructor("!join", yaml_var_concat)

    yaml_path = args.yaml_path
    with open(yaml_path, 'r') as f:
        train_args = DictAsMember(yaml.load(f, Loader=yaml.Loader))

    os.makedirs(train_args["model_save_dir"], exist_ok=True)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    composed = transforms.Compose([Resize((224, 224)),
                                   OneToThreeDimension(),
                                   ToTensor(),
                                   Normalize(mean=[0.485, 0.456, 0.406],
                                             std=[0.229, 0.224, 0.225]),
                                  ])
    acdc_dataset = {x: ACDCDataset(train_args["pos_samps_"+x],
                                  train_args["neg_samps_"+x],
                                  transform=composed)
                   for x in ["train", "val", "test"]}

    dataloader = {x: DataLoader(acdc_dataset[x],
                                batch_size=train_args["batch_size"],
                                shuffle=True, num_workers=4,
                                # sampler=sampler[x]
                                )