model = CH_net().cuda() model.load_state_dict(torch.load('res_net.pth')) config = Config(model) config.optimizer = torch.optim.Adam(config.model.parameters(), lr=0.001) config.lr_optim = torch.optim.lr_scheduler.CosineAnnealingLR(config.optimizer, T_max=32) test_transform = transforms.Compose([ transforms.ToPILImage(), transforms.ToTensor(), ]) test_x = csv_to_tensor('data/test.csv', mode='test') test_set = ImgDataset(test_x, transform=test_transform) test_loader = DataLoader(test_set, batch_size=config.batch_size, shuffle=False) #testing model.eval() prediction = [] with torch.no_grad(): for i, data in enumerate(test_loader): test_pred = config.model(data.cuda()) test_label = np.argmax(test_pred.cpu().data.numpy(), axis=1) for y in test_label: prediction.append(y) #write to csv with open('prediction_0724.csv', 'w') as f: f.write('ImageId,Label\n') for i, y in enumerate(prediction): f.write('{},{}\n'.format(i + 1, y))
for epoch in range(config.epoch): ''' if not change1 and epoch>=config.epoch*1/3: config.optimizer = torch.optim.Adam(config.model.parameters(), lr=0.0003) change1=1 if not change2 and epoch>=config.epoch*2/3: config.optimizer = torch.optim.SGD(config.model.parameters(), lr=0.00005) change2=1 ''' epoch_start_time = time.time() train_loss = 0.0 train_acc = 0.0 config.model.train() for i, data in enumerate(train_loader): config.optimizer.zero_grad() train_pred = config.model(data[0].cuda()) batch_loss = config.loss(train_pred, data[1].cuda()) batch_loss.backward() config.optimizer.step() train_acc += np.sum( np.argmax(train_pred.cpu().data.numpy(), axis=1) == data[1].numpy()) train_loss += batch_loss.item() config.lr_optim.step() print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f' % \ (epoch + 1, config.epoch, time.time()-epoch_start_time, \ train_acc/train_set.__len__(), train_loss/train_set.__len__()))