Example #1
0
        best_epoch_file = get_saved_model_path(best_epoch)
        print(
            f'\nEpoch: {best_epoch} - New best accuracy! Accuracy: {best_acc}\n\n\n'
        )
        torch.save(model.state_dict(), best_epoch_file)


# #### Progress bar - uncomment when testing in notebook

# In[15]:

# pbar = ProgressBar(bar_format='')
# pbar.attach(trainer, output_transform=lambda x: {'loss': x})

# #### Train

# In[16]:

print('Training started\n')
trainer.run(loader, max_epochs=8)

# #### Evaluate

# In[ ]:

all_preds, _ = eval_model(model, tloader, best_epoch_file, path_data)

# In[ ]:

apply_plates_leak(all_preds)
Example #2
0
            os.remove(prev_best_epoch_file)

        best_acc = metrics['accuracy']
        best_epoch = epoch
        best_epoch_file = get_saved_model_path(best_epoch)
        print(
            f'\nEpoch: {best_epoch} - New best accuracy! Accuracy: {best_acc}\n\n\n'
        )
        torch.save(model.state_dict(), best_epoch_file)


# #### Progress bar - uncomment when testing in notebook

# In[15]:

# pbar = ProgressBar(bar_format='')
# pbar.attach(trainer, output_transform=lambda x: {'loss': x})

# #### Train

# In[16]:

print('Training started\n')
trainer.run(loader, max_epochs=2)

# #### Evaluate

# In[ ]:

eval_model(model, tloader, best_epoch_file, path_data)
            torch.save(model.state_dict(), best_epoch_file)

    pbar = ProgressBar(bar_format='')
    pbar.attach(trainer, output_transform=lambda x: {'loss': x})

    print('Training started\n')
    #     trainer.run(loader, max_epochs=epochs_per_cell[cell])
    trainer.run(loader, max_epochs=1)

    tloader = D.DataLoader(cat_test_df,
                           batch_size=1,
                           shuffle=False,
                           num_workers=15)
    cell_preds, _ = eval_model(model,
                               tloader,
                               best_epoch_file,
                               path_data,
                               sub_file=f'submission_{cell}.csv')
    all_preds += cell_preds

# In[ ]:

# aggregate submission files
submissions = []
for cell in cells:
    submissions += [pd.read_csv(f'submission_{cell}.csv')]

submissions = pd.concat(submissions)
submissions.to_csv(f'submission.csv',
                   index=False,
                   columns=['id_code', 'sirna'])