def save_model():
    if use_cuda:
        Decoder.cpu()
    torch.save(Decoder.state_dict(), './Decoder.pkl')
    if use_cuda:
        Decoder.cuda()


def load_weights():
    Decoder.load_state_dict(torch.load('./Decoder.pkl'))


load_weights()
while True:
    Decoder.train()
    train_batch = []
    ground_truth = []
    for i in range(batch_size):
        f = np.random.uniform(0, 1, 9)
        train_batch.append(f)
        ground_truth.append(draw(f))

    train_batch = torch.tensor(train_batch).float()
    ground_truth = torch.tensor(ground_truth).float()
    if use_cuda:
        Decoder = Decoder.cuda()
        train_batch = train_batch.cuda()
        ground_truth = ground_truth.cuda()
    gen = Decoder(train_batch)
    optimizer.zero_grad()
Exemple #2
0
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.9)

loss_func = nn.NLLLoss2d(weight=torch.FloatTensor([1,8000]).cuda())

data_loder = DataLoader()
zeros = np.zeros((512,512))
for epoch in range(epoch0, epoch0+EPOCH):
    scheduler.step()
    train_step = 0
    test_step = 0
    train_loss = 0
    test_loss = 0
    train_accu = 0
    test_accu = 0
    for train, test in data_loder.gen(BATCH_SIZE, get_stored=True):
        net = net.train()
        for X, y in train:
            X = torch.FloatTensor(X[:,np.newaxis,:,:])
            y = torch.LongTensor(y)
            X = Variable(X).cuda()
            y = Variable(y).cuda()
            
            output = net(X)
            output = F.log_softmax(output,dim=1)
            loss = loss_func(output, y)
            train_loss += loss.data[0]
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_step += 1