Exemplo n.º 1
0
def train_network(basis,
                  model_name,
                  epochs,
                  label_norm,
                  batch_size,
                  n_train_spectra,
                  metabolites,
                  export_acquisitions,
                  export_datatype,
                  n_validation_spectra=None):

    if not n_validation_spectra:
        n_validation_spectra = np.ceil(n_train_spectra * 0.2)

    save_root = SAVE_ROOT

    n_train_lw_samples = int(np.round(float(n_train_spectra) / len(basis)))
    n_validation_lw_samples = int(
        np.round(float(n_validation_spectra) / len(basis)))

    train_datasets = []
    for basi in basis:
        train_dataset = Dataset()
        train_dataset._name = 'train'
        train_dataset.basis = basi
        train_dataset.export_datatype = export_datatype
        train_dataset.export_acquisitions = export_acquisitions
        train_dataset.conc_normalisation = label_norm
        train_dataset.export_nu = False
        train_dataset.export_dss_peak = False
        train_dataset.add_adc_noise = True
        train_dataset.add_nu_noise = False
        train_dataset.generate_dataset(metabolites, n_train_lw_samples)
        train_datasets.append(train_dataset)

    benchmark_basis, benchmark_basis_names = load_benchmark_datasets()
    benchmark_datasets = []
    for basi, name in zip(benchmark_basis, benchmark_basis_names):
        td = Dataset()
        td._name = 'benchmark_' + name
        td.basis = basi
        td.copy_settings(train_dataset, test_dataset=True)
        td.copy_basis()
        benchmark_datasets.append([td])

    validation_datasets = []
    for basi in basis:
        validation_dataset = Dataset()
        validation_dataset.basis = basi
        validation_dataset.copy_settings(train_dataset)
        validation_dataset._name = 'validation_multi_lw'
        validation_dataset.generate_dataset(metabolites,
                                            n_validation_lw_samples)
        validation_datasets.append(validation_dataset)

    multi_dataset_train_loop(
        model_name,
        epochs,
        batch_size,
        label_norm,
        save_dir=save_root + 'models/',
        train_datasets=train_datasets,
        test_datasets=benchmark_datasets + [validation_datasets],
        dataset_normalisation=train_dataset.conc_normalisation)
Exemplo n.º 2
0
def quantify(ima_dir, model_dir, metabolites=None):
    basis = Basis.load_dicom(ima_dir)
    model = load_network(model_dir)

    if metabolites:
        # check to see if the network can actually quantify the requested metabolites
        if not all([
                m_name.lower() in [x.lower() for x in model.output_labels]
                for m_name in metabolites
        ]):
            raise Exception(
                'Network is unable to quantify one or more metabolites suplied.'
                'Network is able to quantify: ' + str(model.output_labels) +
                '\n'
                'Requested metabolites: ' + str(metabolites))
    else:
        metabolites = model.output_labels

    # generate a dataset from the loaded dicoms
    dataset = Dataset()
    dataset._name = 'quantify'
    dataset.basis = basis
    dataset.copy_basis()
    dataset.export_datatype = model.export_datatype
    dataset.export_acquisitions = model.export_acquisitions
    dataset.conc_normalisation = model.output_normalisation
    dataset.export_nu = False
    dataset.export_dss_peak = False
    dataset.add_adc_noise = False
    dataset.add_nu_noise = False

    # export the dataset into the format we're looking
    t_data, t_labels, mo_labels = dataset.export_to_keras(
        model_labels=metabolites)
    t_data, input_shape = reshape_data(t_data)

    # run the model in test mode to display the loss and accuracy
    model.evaluate(x=t_data, y=t_labels)
    # quantify!
    predictions = model.predict(t_data)

    # trim the output data to match the metabolites of interest
    cols_index = [[x.lower()
                   for x in model.output_labels].index(m_name.lower())
                  for m_name in metabolites]
    predictions = predictions[:, cols_index]
    # renormalise the output labels
    predictions = normalise_labels(predictions, model.output_normalisation)

    # get the spectra in the order they were exported in dataset.export_to_keras
    # so we can match the predictions to the spectra
    spectra = np.array(dataset.group_spectra_by_id())

    # print the results table!
    print('\n\nQuantifying %d MEGA-PRESS Spectra' % (len(spectra)))
    print('This network can only quantify: ' + str(model.output_labels))
    print('\tNetwork path: ' + model_dir)
    print('\tDICOM path: ' + ima_dir + '\n\n')

    for spec, prediction in zip(spectra, predictions):
        if sum(spec[0].concentrations):
            print('Spectra ID: ' + spec[0].id)
            print('\t%-*s %s %s' % (20, 'Metabolite', 'Predicded', 'Actual'))
            actual_concentrations = normalise_labels(
                spec[0].concentrations, model.output_normalisation)
            for p, a, m_name in zip(
                    prediction, actual_concentrations,
                    convert_mol_names(metabolites, mode='lengthen')):
                print('\t%-*s %.6f %.6f' % (20, m_name, p, a))
            print('\n')
        else:
            print('Spectra ID: ' + spec[0].id)
            print('\t%-*s %s' % (20, 'Metabolite', 'Predicted'))
            for p, m_name in zip(
                    prediction, convert_mol_names(metabolites,
                                                  mode='lengthen')):
                print('\t%-*s %.6f' % (20, m_name, p))
            print('\n')