示例#1
0
def testing(testing_flag, batch_size):
    autoencoder, encoder, decoder = chapman_autoencoder.get_trained_autoencoder(
        testing_flag)
    model = deepcopy(encoder)

    # get chapman datasets
    user_datasets, patient_to_rhythm_dict, test_train_split_dict, working_directory = get_datasets_from_paths(
        testing_flag)

    # get 4 unique rhythms
    unique_rhythms_words = set(list(patient_to_rhythm_dict.values()))
    # {'AFIB': 0, 'SB': 1, 'SR': 2, 'GSVT': 3}
    rythm_to_label_encoding = {
        rhythm: index
        for index, rhythm in enumerate(unique_rhythms_words)
    }

    train_chapman_dataset = byol_chapman_utilities.ChapmanDataset(
        user_datasets, patient_to_rhythm_dict, test_train_split_dict, 'train')
    test_chapman_dataset = byol_chapman_utilities.ChapmanDataset(
        user_datasets, patient_to_rhythm_dict, test_train_split_dict, 'test')

    train_loader = DataLoader(train_chapman_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    val_loader = DataLoader(
        test_chapman_dataset,
        batch_size=batch_size,
    )

    byol_model = deepcopy(model)
    byol = byol_chapman_utilities.BYOL(byol_model, image_size=(2500, 4))
    byol_trainer = pl.Trainer(
        max_epochs=10,
        accumulate_grad_batches=2048 // batch_size,
        weights_summary=None,
    )
    byol_trainer.fit(byol, train_loader, val_loader)

    byol_encoder = byol.encoder
    state_dict = byol_model.state_dict()
    new_model = deepcopy(encoder)
    new_model.load_state_dict(state_dict)

    for data_label in val_loader:
        data, label = data_label
        byol_encoded_data = byol_encoder(data.float())
        byol_new_model_data = new_model(data.float())
        print(f'byol encoder data shape: {byol_encoded_data.size()}')
        print(f'byol state dict model shape: {byol_new_model_data.size()}')
        print(f'byol encoded size {byol_encoded_data.size()}')
        print(label)
def autoencoder_main(testing_flag, batch_size, epoch_number, latent_dim,
                     projection_dim):
    # get trained autoencoder
    autoencoder, encoder, decoder = chapman_autoencoder.get_trained_autoencoder(
        testing_flag, latent_dim)
    # we will use the encoder as input into byol
    model = deepcopy(encoder)

    # get chapman datasets
    user_datasets, patient_to_rhythm_dict, test_train_split_dict, working_directory, path_to_embeddings = get_datasets_from_paths(
        testing_flag)

    # get 4 unique rhythms
    unique_rhythms_words = set(list(patient_to_rhythm_dict.values()))
    # {'AFIB': 0, 'SB': 1, 'SR': 2, 'GSVT': 3}
    rythm_to_label_encoding = {
        rhythm: index
        for index, rhythm in enumerate(unique_rhythms_words)
    }

    # get train and test datasets and create dataloaders
    train_chapman_dataset = byol_chapman_utilities.ChapmanDataset(
        user_datasets, patient_to_rhythm_dict, test_train_split_dict, 'train')
    test_chapman_dataset = byol_chapman_utilities.ChapmanDataset(
        user_datasets, patient_to_rhythm_dict, test_train_split_dict, 'test')

    train_loader = DataLoader(train_chapman_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    val_loader = DataLoader(test_chapman_dataset, batch_size=batch_size)

    # byol training model
    byol_model = deepcopy(model)
    byol = byol_chapman_utilities.BYOL(byol_model,
                                       image_size=(2500, 4),
                                       projection_size=projection_dim)
    byol_trainer = pl.Trainer(max_epochs=epoch_number,
                              weights_summary=None,
                              logger=False)
    byol_trainer.fit(byol, train_loader, val_loader)

    state_dict = byol_model.state_dict()
    byol_encoder = deepcopy(encoder)
    byol_encoder.load_state_dict(state_dict)

    return byol_encoder, test_chapman_dataset, train_chapman_dataset, working_directory, path_to_embeddings
示例#3
0
def resnet_main(testing_flag, batch_size):
    # get chapman datasets
    user_datasets, patient_to_rhythm_dict, test_train_split_dict, working_directory = get_datasets_from_paths(
        testing_flag)

    # get 4 unique rhythms
    unique_rhythms_words = set(list(patient_to_rhythm_dict.values()))
    # {'AFIB': 0, 'SB': 1, 'SR': 2, 'GSVT': 3}
    rythm_to_label_encoding = {
        rhythm: index
        for index, rhythm in enumerate(unique_rhythms_words)
    }

    # get train and test datasets and create dataloaders
    train_chapman_dataset = byol_chapman_utilities.ChapmanDataset(
        user_datasets, patient_to_rhythm_dict, test_train_split_dict, 'train')
    test_chapman_dataset = byol_chapman_utilities.ChapmanDataset(
        user_datasets, patient_to_rhythm_dict, test_train_split_dict, 'test')

    train_loader = DataLoader(train_chapman_dataset,
                              batch_size=batch_size,
                              shuffle=True)
    val_loader = DataLoader(
        test_chapman_dataset,
        batch_size=batch_size,
    )
    print('got here')
    model = resnet18()
    model.conv1 = nn.Conv2d(1,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias=False)
    model_name = 'resnet18'

    # supervised learning before byol
    supervised_model = deepcopy(model)
    supervised = byol_chapman_utilities.SupervisedLightningModule(
        supervised_model)
    supervised_trainer = pl.Trainer(max_epochs=25, weights_summary=None)
    supervised_trainer.fit(supervised, train_loader, val_loader)
    supervised_accuracy = byol_chapman_utilities.accuracy_from_val_loader_and_model(
        val_loader, supervised_model)

    # byol training model
    byol_model = deepcopy(model)
    byol = byol_chapman_utilities.BYOL(byol_model, image_size=(2500, 4))
    byol_trainer = pl.Trainer(
        max_epochs=10,
        accumulate_grad_batches=2048 // batch_size,
        weights_summary=None,
    )
    byol_trainer.fit(byol, train_loader, val_loader)

    # supervised learning again after byol
    state_dict = byol_model.state_dict()
    post_byol_model = deepcopy(model)
    post_byol_model.load_state_dict(state_dict)
    post_byol_supervised = byol_chapman_utilities.SupervisedLightningModule(
        post_byol_model)
    post_byol_trainer = pl.Trainer(
        max_epochs=10,
        accumulate_grad_batches=2048 // 128,
        weights_summary=None,
    )
    post_byol_trainer.fit(post_byol_supervised, train_loader, val_loader)
    post_byol_accuracy = byol_chapman_utilities.accuracy_from_val_loader_and_model(
        val_loader, post_byol_model)

    # final results
    print(f'supervised accuracy - {supervised_accuracy}')
    print(f'post byol supervised accuracy - {post_byol_accuracy}')

    save_dict = {
        'supervised_acc': supervised_accuracy,
        'post_byol_acc': post_byol_accuracy
    }

    # save results
    start_time = datetime.datetime.now()
    start_time_str = start_time.strftime("%Y%m%d-%H%M%S")

    save_filename = f'{testing_flag}-{batch_size}-{model_name}-{start_time_str}-byol-chapman.pickle'
    save_path = os.path.join(working_directory, save_filename)

    with open(save_path, 'wb') as f:
        pickle.dump(save_dict, f)