示例#1
0
if os.path.exists(opts.model_file):
    print_log(
        'loading previous best checkpoint [{}] ...'.format(opts.model_file),
        opts.log_file)
    net.load_state_dict(torch.load(opts.model_file))

if opts.multi_gpu:
    print_log('Wrapping network into multi-gpu mode ...', opts.log_file)
    net = torch.nn.DataParallel(net)

# PREPARE DATA
train_db, val_db, test_db, _ = data_loader(opts)

# MISC
# TODO: original repo don't have weight decay
optimizer = optim.Adam(net.parameters(),
                       lr=opts.lr,
                       weight_decay=opts.weight_decay)
# scheduler = MultiStepLR(optimizer, milestones=opts.scheduler, gamma=opts.lr_scheduler_gamma)
scheduler = StepLR(optimizer,
                   gamma=opts.lr_scheduler_gamma,
                   step_size=opts.lr_scheduler_step)

# PIPELINE
if val_db is None:
    best_state = None
train_loss, train_acc, val_loss, val_acc, best_acc = [], [], [], [], 0

for epoch in range(opts.nep):

    old_lr = optimizer.param_groups[0]['lr']
# y = y.to(device)
# out = model(x)
# generator(out)
# ps, qs = get_proto_query(out.cpu(), y.cpu(), n_aug=1, n_support=1)

# In[32]:

# ps.shape

# In[33]:

# ps

# In[34]:

parameters = list(model.parameters()) + list(generator.parameters())
optim = torch.optim.Adam(params=parameters, lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optim,
                                            gamma=lr_gamma,
                                            step_size=lr_step)

# In[35]:


def get_proto_query(input, target, n_aug, n_support):
    def get_supp_idxs(c):
        return target.eq(c).nonzero()[:n_support].squeeze(1)

    def get_query_idxs(c):
        return target.eq(c).nonzero()[n_support:].squeeze(1)
示例#3
0
# train_sampler = BatchSampler(train_dataset.labels, N_way, sample_per_class, episodes)
# val_sampler = BatchSampler(val_dataset.labels, val_N_way, sample_per_class, episodes)

# train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler)
# val_dataloader = DataLoader(val_dataset, batch_sampler=val_sampler)

# In[6]:

device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = ProtoNet()
model = model.to(device)

# In[7]:

optim = torch.optim.Adam(params=model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optim,
                                            gamma=lr_gamma,
                                            step_size=lr_step)

# In[8]:

train_loss = []
train_acc = []
val_loss = []
val_acc = []
best_acc = 0
best_model = None

for epoch in range(num_epochs):
    print("Epoch: {}/{}".format(epoch, num_epochs))