Пример #1
0
test_path = sys.argv[1]
output_path = sys.argv[2]
model_path = './p1_model.pth'

batch_size = 8
workers = 2
num_classes = 37

test_tfm = transforms.Compose([
    transforms.Resize((384, 384)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

test_dataset = p1_data(test_path, mode='test', transform=test_tfm)

# Create the dataloader
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# Decide which device we want to run on
device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")

model = ViT('B_16_imagenet1k', pretrained=True, num_classes=num_classes)
model = model.to(device)
model.load_state_dict(torch.load(model_path))
model.eval()

predictions = []
filenames = []
Пример #2
0
model_path = './p1_model.ckpt'

root = './hw1_data/p1_data/'

tfm = transforms.Compose([
    transforms.Resize([256, 256]),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])
batch_size = 32

# construct dataset and dataloader

valid_set = p1_data(root, 'valid', transform=tfm)
valid_loader = DataLoader(valid_set,
                          batch_size=batch_size,
                          shuffle=False,
                          num_workers=8,
                          pin_memory=True)

# model
device = "cuda" if torch.cuda.is_available() else "cpu"

model = resnet152(pretrained=False)
model.fc = nn.Linear(2048, 50)
model.load_state_dict(torch.load(model_path))
model.fc = nn.Identity()
model = model.to(device)
Пример #3
0
from p1_dataset import p1_data
from torchvision.models import resnet152

# create testing dataset
model_path = sys.argv[1]
in_dir = sys.argv[2]
out_path = sys.argv[3]
batch_size = 32

test_tfm = transforms.Compose([
    transforms.Resize([256, 256]),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])    
])

test_set = p1_data(in_dir, 'test', transform=test_tfm)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)
# create model and load weights from checkpoint
device = "cuda" if torch.cuda.is_available() else "cpu"
model = resnet152(pretrained=True)
# modify the last layer
model.fc = nn.Linear(2048, 50)

model.load_state_dict(torch.load(model_path))
model = model.to(device)

predict = []
filenames = []
model.eval() # set the model to evaluation mode

for batch in test_loader:
Пример #4
0
train_tfm = transforms.Compose([
    transforms.RandomRotation(30),
    transforms.RandomResizedCrop(384, scale=(0.8, 1.0)),
    transforms.ColorJitter(brightness=0.3),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

test_tfm = transforms.Compose([
    transforms.Resize((384, 384)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

train_dataset = p1_data(train_dir, mode='train', transform=train_tfm)
valid_dataset = p1_data(valid_dir, mode='valid', transform=test_tfm)

# Create the dataloader
train_loader = DataLoader(train_dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=workers)
valid_loader = DataLoader(valid_dataset,
                          batch_size=batch_size,
                          shuffle=False,
                          num_workers=workers)

# Decide which device we want to run on
device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu")
Пример #5
0
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])

test_tfm = transforms.Compose([
    transforms.Resize([256, 256]),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                          0.225]),
])

batch_size = 32

# Construct datasets

train_set = p1_data(root, 'train', transform=train_tfm)
valid_set = p1_data(root, 'valid', transform=test_tfm)

# Construct data loaders
train_loader = DataLoader(train_set,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=8,
                          pin_memory=True)
valid_loader = DataLoader(valid_set,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=8,
                          pin_memory=True)

# model