Esempio n. 1
0
    raise NameError('please set the test_model file to choose the checkpoint!')
# read dataset
trainset = dataset.CUB(root='./CUB_200_2011', is_train=True, data_len=None)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True,
                                          num_workers=8,
                                          drop_last=False)
testset = dataset.CUB(root='./CUB_200_2011', is_train=False, data_len=None)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                         num_workers=8,
                                         drop_last=False)
# define model
net = model.attention_net(topN=PROPOSAL_NUM)
ckpt = torch.load(test_model)
net.load_state_dict(ckpt['net_state_dict'])
net = net.cuda()
net = DataParallel(net)
creterion = torch.nn.CrossEntropyLoss()

##########################  evaluate net on train set  ###############################
train_loss = 0
train_correct = 0
total = 0
net.eval()

for i, data in enumerate(trainloader):
    with torch.no_grad():
        img, label = data[0].cuda(), data[1].cuda()
Esempio n. 2
0
                                          num_workers=8,
                                          drop_last=False)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                         num_workers=8,
                                         drop_last=False)
valloader = torch.utils.data.DataLoader(valset,
                                        batch_size=BATCH_SIZE,
                                        shuffle=False,
                                        num_workers=8,
                                        drop_last=False)

n_class = 2
# define model
net = model.attention_net(topN=PROPOSAL_NUM, n_class=n_class)
if resume:
    ckpt = torch.load(resume)
    net.load_state_dict(ckpt['net_state_dict'])
    start_epoch = ckpt['epoch'] + 1
creterion = torch.nn.CrossEntropyLoss()

# define optimizers
raw_parameters = list(net.pretrained_model.parameters())
part_parameters = list(net.proposal_net.parameters())
concat_parameters = list(net.concat_net.parameters())
partcls_parameters = list(net.partcls_net.parameters())

raw_optimizer = torch.optim.SGD(raw_parameters,
                                lr=LR,
                                momentum=0.9,
Esempio n. 3
0
# read dataset
trainset = dataset.Fish(root='./datasets/Fish', is_train=True, data_len=None)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True,
                                          num_workers=0,
                                          drop_last=False)  # num_workers=8
testset = dataset.Fish(root='./datasets/Fish', is_train=False, data_len=None)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=False,
                                         num_workers=0,
                                         drop_last=False)  # num_workers=8
# define model
net = model.attention_net(topN=PROPOSAL_NUM, classNum=CLASS_NUM)
if resume:
    print("\ncontinue train")
    ckpt = torch.load(resume)
    net.load_state_dict(ckpt['net_state_dict'])
    start_epoch = ckpt['epoch'] + 1
creterion = torch.nn.CrossEntropyLoss()

# print network to txt
f = open(os.path.join(save_dir, "model_network.txt"), "w")
print(net, file=f)
f.close()

# save config file to save dir
shutil.copy('./config.py', os.path.join(save_dir, 'config.py'))
Esempio n. 4
0
    len(val_dataset)))

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           num_workers=args.workers,
                                           pin_memory=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=args.workers,
                                         pin_memory=True)

# create model
print("Building model ... ")
model = nts_net.attention_net(topN=PROPOSAL_NUM)

# define loss function (criterion)
criterion = torch.nn.CrossEntropyLoss().to(device)

if not os.path.exists(args.resume):
    os.makedirs(args.resume)
print("Saving everything to directory %s." % (args.resume))

# define optimizers
raw_parameters = list(model.pretrained_model.parameters())
part_parameters = list(model.proposal_net.parameters())
concat_parameters = list(model.concat_net.parameters())
partcls_parameters = list(model.partcls_net.parameters())

raw_optimizer = torch.optim.SGD(raw_parameters,
Esempio n. 5
0
                                              new_width=args.new_width,
                                              new_height=args.new_height)

print('{} rgb samples found, {} flow samples found.'.format(
    len(val_dataset),
    len(val_dataset) * 2))

val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=args.workers,
                                         pin_memory=True)

# load model
print("loading models ... ")
rgb_nts_net = model.attention_net(topN=PROPOSAL_NUM)
# rgb_nts_net.load_state_dict(torch.load(RGB_MODEL_PATH))
flow_nts_net = model.attention_net(topN=PROPOSAL_NUM)
# flow_nts_net.load_state_dict(torch.load(FLOW_MODEL_PATH))
rgb_nts_net = rgb_nts_net.to(device)
flow_nts_net = flow_nts_net.to(device)

# define loss function (criterion)
criterion = torch.nn.CrossEntropyLoss().to(device)
rgb_nts_net.eval()
flow_nts_net.eval()

# evaluate on val
loss = 0
correct = 0
n_sample = 0