Beispiel #1
0
 def _log_coalescent_heatmap(self, model_name, averaged_coals, ix):
     from .viz import make_coalescent_heatmap
     ensure_directories(self._figure_root, model_name)
     
     f = make_coalescent_heatmap(model_name, averaged_coals)
     
     plt.savefig(
         os.path.join(
             self._figure_root, model_name,
             '{ix}-heatmap-{model}.png'.format(ix=ix, model=model_name)
         )
     )
     return f
Beispiel #2
0
def train(dataset,
          model,
          device,
          seed,
          n_epochs,
          batch_size,
          output_path,
          logger,
          num_workers=1,
          quiet=False):
    model_root, = ensure_directories(output_path, 'models/')
    parameters_path = os.path.join(model_root,
                                   '{model}.pt'.format(model=model.name))
    device = torch.device(device)
    torch.manual_seed(seed)

    clf = Classifier(model, logger=logger, device=device)
    if not quiet:
        print("Running {}-model...".format(model.name))
    losses = clf.fit(dataset,
                     n_epochs=n_epochs,
                     progress=None if quiet else tqdm)

    clf.save(parameters_path, quiet)

    logger.log_losses("dataset", model.name, losses)
Beispiel #3
0
def test(dataset, model, device, batch_size, output_path, logger, project,
         workspace):
    model_root, = ensure_directories(output_path, 'models/')
    parameters_path = os.path.join(model_root,
                                   '{model}.pt'.format(model=model.name))
    device = torch.device(device)

    # train_loader = torch.utils.data.DataLoader(dataset.train_set, batch_size=batch_size, shuffle=True)
    # test_loader = torch.utils.data.DataLoader(dataset.test_set, batch_size=batch_size, shuffle=True)

    clf = Classifier(model, logger=logger, device=device)

    state_dict = torch.load(parameters_path, map_location=torch.device(device))
    clf.classifier.load_state_dict(state_dict)

    # predictions_train, true_train = clf.predict(train_loader)
    # predictions_test, true_test = clf.predict(test_loader)
    #
    # accuracy_train = np.mean(np.argmax(predictions_train, axis=1) == true_train)
    # accuracy_test = np.mean(np.argmax(predictions_test, axis=1) == true_test)

    # todo
    heatmap_preds = clf.predict_proba(dataset, logger)

    # logger.log_metrics("d", model.name, accuracy_train=accuracy_train, accuracy_test=accuracy_test)
    logger.log_coalescent_heatmap(model.name, heatmap_preds, "00000")
Beispiel #4
0
def get_logger(logger, root, project=None, workspace=None, offline=True) -> Logger:
    from genomics_utils import LocalLogger, CometLogger
    
    if logger.lower() == "local":
        return LocalLogger(root)
    
    elif logger.lower() == "comet":
        assert project is not None, 'for comet logger, please, provide project name'
        assert workspace is not None, 'for comet logger, please, provide workspace'
        
        if offline:
            comet_path, = ensure_directories(root, "comet/")
            experiment = OfflineExperiment(project_name=project,
                                           workspace=workspace,
                                           offline_directory=comet_path
                                           )
        else:
            experiment = Experiment(project_name=project, workspace=workspace)
        return CometLogger(root=root, experiment=experiment)
    
    else:
        raise ValueError("Unknown experiment context")
Beispiel #5
0
 def __init__(self, root):
     self._report_root, self._figure_root = ensure_directories(root, 'reports/', 'figures/')
     
     super(LocalLogger, self).__init__()
Beispiel #6
0
    bert_parser = model_parsers.add_parser('bert')
    conv_bert_parser = model_parsers.add_parser('conv_bert')

    gru_add_arguments(gru_parser)
    conv_gru_add_arguments(conv_gru_parser)
    bert_add_arguments(bert_parser)
    conv_bert_add_arguments(conv_bert_parser)

    # small models
    conv_small_parser = model_parsers.add_parser('conv_small')
    conv_add_arguments(conv_small_parser)

    args = parser.parse_args()
    print(args)
    model = available.models[args.model].Model(args)
    model_root, = ensure_directories(args.output, 'models/')
    test_output, = ensure_directories(args.data, "{}".format(model.name))
    default_root_dir, = ensure_directories(args.output,
                                           'models/{}'.format(model.name))

    checkpoint_path = os.path.join(default_root_dir,
                                   '{model}.pt'.format(model=model.name))

    comet_logger = CometLightningLogger(workspace=args.cmt_workspace,
                                        project_name=args.cmt_project,
                                        save_dir=default_root_dir,
                                        offline=args.cmt_offline,
                                        disabled=args.cmt_disabled,
                                        experiment_name=model.name)

    datamodule = DatasetPL(path=args.data,