예제 #1
0
def process(folder, censor_date):
    """ Post process data """
    print(folder)
    util.setup(folder)

    image_ids, survival_days = util.get_image_id_and_survival_days(
        study_id="GBM_survival_time", censor_date_str=censor_date)
    result = util.post_calculations(image_ids)
    print('Total: ' + str(len(result['all'])) + ' patients')
    util.avg_calculation(result['all'],
                         'tumor',
                         None,
                         True,
                         folder,
                         save_sum=True)
    util.avg_calculation(result['img'], 'volume', None, True, folder)
    util.mortality_rate_calculation(result['all'],
                                    '',
                                    survival_days,
                                    True,
                                    folder,
                                    default_value=-1)

    image_ids, survival_days = util.get_image_id_and_survival_days(
        study_id="GBM_survival_time",
        censor_date_str=censor_date,
        resection=True)
    result = util.post_calculations(image_ids)
    print('Resected: ' + str(len(result['all'])) + ' patients')
    util.avg_calculation(result['all'],
                         'tumor_resected',
                         None,
                         True,
                         folder,
                         save_sum=True)
    util.avg_calculation(result['img'], 'volume_resected', None, True, folder)
    util.mortality_rate_calculation(result['all'],
                                    '_resected',
                                    survival_days,
                                    True,
                                    folder,
                                    default_value=-1)

    survival_groups = [[0, 182], [183, 730], [731, float('Inf')]]

    for group in survival_groups:
        image_ids, survival_days = util.get_image_id_and_survival_days(
            study_id="GBM_survival_time",
            censor_date_str=censor_date,
            survival_group=group)
        result = util.post_calculations(image_ids)
        print('Group ' + str(group) + ': ' + str(len(result['all'])) +
              ' patients')
        label = 'tumor_' + str(group[0]) + '-' + str(group[1])
        util.avg_calculation(result['all'],
                             label,
                             None,
                             True,
                             folder,
                             save_sum=True)
예제 #2
0
def process(folder, censor_date, survival_groups, pids_to_exclude=None):
    """ Post process data """
    print(folder)
    util.setup(folder)

    image_ids, survival_days = util.get_image_id_and_survival_days(study_id="GBM_survival_time", exclude_pid=pids_to_exclude, censor_date_str=censor_date)
    result = util.post_calculations(image_ids)
    print('Total: ' + str(len(result['all'])) + ' patients')
    util.avg_calculation(result['all'], 'tumor', None, True, folder, save_sum=True)
    util.mortality_rate_calculation(result['all'], '_all_year', survival_days, True, folder, default_value=-1, max_value=150, per_year=True)
    util.avg_calculation(result['img'], 'volume', None, True, folder)

    image_ids, survival_days = util.get_image_id_and_survival_days(study_id="GBM_survival_time", exclude_pid=pids_to_exclude, censor_date_str=censor_date,resection=True)
    result = util.post_calculations(image_ids)
    print('Resected: ' + str(len(result['all'])) + ' patients')
    util.avg_calculation(result['all'], 'tumor_resected', None, True, folder, save_sum=True)
    util.avg_calculation(result['img'], 'volume_resected', None, True, folder)
    util.mortality_rate_calculation(result['all'], '_resected_year', survival_days, True, folder, default_value=-1, max_value=150, per_year=True)

    for group in survival_groups:
        image_ids, survival_days = util.get_image_id_and_survival_days(study_id="GBM_survival_time", exclude_pid=pids_to_exclude, censor_date_str=censor_date,survival_group=group)
        result = util.post_calculations(image_ids)
        print('Group ' + str(group) + ': ' + str(len(result['all'])) + ' patients')
        label = 'tumor_' + str(group[0]) + '-' + str(group[1])
        util.avg_calculation(result['all'], label, None, True, folder, save_sum=True)
예제 #3
0
def process3(folder):
    """ Post process data """
    print(folder)
    util.setup(folder)
    params = ['Mobility', 'Selfcare', 'Activity', 'Pain', 'Anxiety', 'karnofsky', 'Index_value']
    image_ids = find_images()
    result = util.post_calculations(image_ids)
    print(len(result['all']))
    # util.avg_calculation(result['all'], 'all_N=112', None, True, folder, save_sum=True)
    # util.avg_calculation(result['img'], 'img_N=112', None, True, folder)

    for qol_param in params:
        if qol_param == "Delta_qol2":
            (image_ids_with_qol, qol) = util.get_qol(image_ids, "Delta_qol")
            qol = [-1 if _temp <= -0.15 else 0 if _temp < 0.15 else 1 for _temp in qol]
        else:
            (image_ids_with_qol, qol) = util.get_qol(image_ids, qol_param)
        if qol_param not in ["karnofsky", "Delta_kps"]:
            qol = [_temp * 100 for _temp in qol]
        default_value = -100
        print(qol_param)
        print(len(qol))
        result = util.post_calculations(image_ids_with_qol)
        for label in result:
            if label == 'img':
                continue
            print(label)
            util.avg_calculation(result[label], label + '_' + qol_param + '_N=112', qol, True, folder, default_value=default_value)
            util.median_calculation(result[label], label + '_' + qol_param + '_N=112', qol, True, folder, default_value=default_value)
예제 #4
0
def process2(folder):
    """ Post process data Delta"""
    print(folder)
    util.setup(folder)
    params = ['Delta_qol', 'Delta_qol2', 'Delta_mobility', 'Delta_selfcare', 'Delta_activity', 'Delta_pain', 'Delta_anixety', 'Delta_kps']
    image_ids = find_images()
    result = util.post_calculations(image_ids)
    print(len(result['all']))
    util.avg_calculation(result['all'], 'all_N=112', None, True, folder, save_sum=True)
    util.avg_calculation(result['img'], 'img_N=112', None, True, folder)
    print("\n\n\n\n\n")

    for qol_param in params:
        if qol_param == "Delta_qol2":
            (image_ids_with_qol, qol) = util.get_qol(image_ids, "Delta_qol")
            qol = [-1 if _temp <= -0.15 else 0 if _temp < 0.15 else 1 for _temp in qol]
        else:
            (image_ids_with_qol, qol) = util.get_qol(image_ids, qol_param)
        qol = [_temp * 100 for _temp in qol]
        default_value = -300
        print(qol_param, len(qol))
        result = util.post_calculations(image_ids_with_qol)
        for label in result:
            if label == 'img':
                continue
            print(label)
            # util.avg_calculation(result[label], label + '_' + qol_param, qol, True, folder, default_value=default_value)
            util.median_calculation(result[label], label + '_' + qol_param, qol, True, folder, default_value=default_value)
예제 #5
0
def process(folder):
    """ Post process data distribution and baseline"""
    print(folder)
    util.setup(folder)
    params = ['Mobility', 'Selfcare', 'Activity', 'Pain', 'Anxiety', 'karnofsky', 'Index_value']
    image_ids = find_images_163()
    result = util.post_calculations(image_ids)
    print(len(result['all']))
    util.avg_calculation(result['all'], 'all_N=163', None, True, folder, save_sum=True)
    util.avg_calculation(result['img'], 'img_N=163', None, True, folder)

    image_ids = do_img_registration_GBM.find_images()
    result = util.post_calculations(image_ids)
    print(len(result['all']))
    util.avg_calculation(result['all'], 'all_N=170', None, True, folder, save_sum=True)
    util.avg_calculation(result['img'], 'img_N=170', None, True, folder)
    for qol_param in params:
        (image_ids_with_qol, qol) = util.get_qol(image_ids, qol_param)
        if qol_param not in ["karnofsky", "Delta_kps"]:
            qol = [_temp * 100 for _temp in qol]
        default_value = -100
        print(qol_param)
        print(len(qol))
        result = util.post_calculations(image_ids_with_qol)
        for label in result:
            if label == 'img':
                continue
            print(label)
            # util.avg_calculation(result[label], label + '_' + qol_param, qol, True, folder, default_value=default_value)
            util.median_calculation(result[label], label + '_' + qol_param, qol, True, folder, default_value=default_value)
def validate(folder):
    """ Post process data tumor volume"""
    print(folder)
    util.setup(folder, 'MolekylareMarkorer')
    conn = sqlite3.connect(util.DB_PATH, timeout=120)
    conn.text_factory = str
    cursor = conn.execute(
        '''SELECT pid from MolekylareMarkorer ORDER BY pid''')

    brain_mask = nib.load(util.TEMPLATE_MASK).get_data()

    max_val = 0
    max_pid = -1
    for pid in cursor:
        pid = pid[0]

        _id = conn.execute('''SELECT id from Images where pid = ?''',
                           (pid, )).fetchone()
        if not _id:
            print("---No data for ", pid)
            continue
        _id = _id[0]

        _filepath = conn.execute(
            "SELECT filepath_reg from Labels where image_id = ?",
            (_id, )).fetchone()[0]

        tumor_data = nib.load(util.DATA_FOLDER + _filepath).get_data()
        union_data = (1 - brain_mask) * tumor_data
        print(pid, np.sum(union_data[:]))
        if np.sum(union_data[:]) > max_val:
            max_val = np.sum(union_data[:])
            max_pid = pid
    print("---------- ", max_pid, max_val)
예제 #7
0
def process(folder):
    """ Post process data """
    print(folder)
    util.setup(folder)

    for grade in [2, 3, 4]:
        image_ids, survival_days = util.get_image_id_and_survival_days(
            glioma_grades=[grade])
        result = util.post_calculations(image_ids)

        print(len(result['all']))
        util.avg_calculation(result['all'],
                             'all_',
                             None,
                             True,
                             folder,
                             save_sum=True)
        util.avg_calculation(result['img'], 'img_', None, True, folder)

        for label in result:
            if label == 'img':
                continue
            util.avg_calculation(result[label],
                                 'survival_time_grade_' + str(grade),
                                 survival_days,
                                 True,
                                 folder,
                                 default_value=-1)
def test_selectbox(select_name, select_option, url_param):
    '''Changes the selectbox value and ensures that it was applied to the search
    
    Parameters:
        select_name: The id of the select element (string)
        select_option: The text of the option to select (string)
        url_param: The URL query string that should be applied (string)
    '''

    # Make sure this test is from a clean state
    setup("Ensuring that the search applies the parameter {}".format(url_param))

    # Change the dropdown option
    change_dropdown_selection(select_name, select_option)

    # Allow time for search options to update in JS
    time.sleep(1)

    # Press search
    search_button = return_when_visible(By.XPATH, "//button[@type='submit']")
    search_button.click()

    # Slight delay to see search results screen
    time.sleep(1)

    # Check if the url query string matches the search options
    test_passed(assert_url_contains(url_param))
예제 #9
0
def process_vlsm(folder, n_permutations):
    """ Post process vlsm data """
    print(folder)
    util.setup(folder)
    image_ids = find_images()
    params = [
        'Index_value', 'karnofsky', 'Mobility', 'Selfcare', 'Activity', 'Pain',
        'Anxiety'
    ]
    alternative = [
        'less', 'less', 'greater', 'greater', 'greater', 'greater', 'greater'
    ]
    stat_func = [
        util.brunner_munzel_test, util.mannwhitneyu_test,
        util.mannwhitneyu_test, util.mannwhitneyu_test, util.mannwhitneyu_test,
        util.mannwhitneyu_test, util.mannwhitneyu_test
    ]
    for (qol_param, stat_func_i, alternative_i) in zip(params, stat_func,
                                                       alternative):
        (image_ids_with_qol, qol) = util.get_qol(image_ids, qol_param)
        result = util.post_calculations(image_ids_with_qol)
        for label in result:
            print(label)
            if label == 'img':
                continue
            util.vlsm(result[label],
                      label + '_' + qol_param,
                      stat_func_i,
                      qol,
                      folder,
                      n_permutations=n_permutations,
                      alternative=alternative_i)
예제 #10
0
def process_labels2(folder):
    """ Post process data tumor volume"""
    print(folder)
    util.setup(folder)
    conn = sqlite3.connect(util.DB_PATH, timeout=120)
    conn.text_factory = str
    cursor = conn.execute('''SELECT pid from Patient where study_id = ?''', ("qol_grade3,4", ))

    atlas_path = "/home/dahoiv/disk/Dropbox/Jobb/gbm/Atlas/Hammers/Hammers_mith-n30r95-MaxProbMap-full-MNI152-SPM12.nii.gz"
    resample = slicer.registration.brainsresample.BRAINSResample(command=util.BRAINSResample_PATH,
                                                                 inputVolume=atlas_path,
                                                                 outputVolume=os.path.abspath(folder +
                                                                                              'Hammers_mith-n30r95-MaxProbMap-full'
                                                                                              '-MNI152-SPM12_resample.nii.gz'),
                                                                 referenceVolume=os.path.abspath(util.TEMPLATE_VOLUME))
    resample.run()

    img = nib.load(folder + 'Hammers_mith-n30r95-MaxProbMap-full-MNI152-SPM12_resample.nii.gz')
    lobes_brain = img.get_data()
    label_defs = util.get_label_defs_hammers_mith()
    res_lobes_brain = {}

    book = Workbook()
    sheet = book.active

    sheet.cell(row=1, column=1).value = 'PID'
    sheet.cell(row=1, column=2).value = 'Lobe'
    # sheet.cell(row=1, column=3).value = 'Center of mass'
    k = 2
    for pid in cursor:
        pid = pid[0]

        _id = conn.execute('''SELECT id from Images where pid = ?''', (pid, )).fetchone()
        if not _id:
            print("---No data for ", pid)
            continue
        _id = _id[0]

        _filepath = conn.execute("SELECT filepath_reg from Labels where image_id = ?",
                                 (_id, )).fetchone()[0]
        if _filepath is None:
            print("No filepath for ", pid)
            continue

        com, com_idx = util.get_center_of_mass(util.DATA_FOLDER + _filepath)

        lobe = label_defs.get(lobes_brain[com_idx[0], com_idx[1], com_idx[2]], 'other')
        res_lobes_brain[pid] = lobe

        sheet.cell(row=k, column=1).value = pid
        sheet.cell(row=k, column=2).value = lobe
        sheet.cell(row=k, column=4).value = str(com_idx[0]) + " " + str(com_idx[1]) + " " + str(com_idx[2])
        k += 1

    book.save("brain_lobes_Hammers_mith_n30r95.xlsx")

    print(res_lobes_brain, len(res_lobes_brain))
예제 #11
0
def export_labels(folder, censor_date, survival_groups, pids_to_exclude):
    """ Post process data """
    print(folder)
    util.setup(folder)

    image_ids, survival_days = util.get_image_id_and_survival_days(study_id="GBM_survival_time", exclude_pid=pids_to_exclude, censor_date_str=censor_date)
    result = util.post_calculations(image_ids)
    print('Total: ' + str(len(result['all'])) + ' patients')
        
    util.export_labels_and_survival_groups(result['all'], 'tumor', survival_days, survival_groups, True, folder )
예제 #12
0
def process_labels(folder):
    """ Post process data tumor volume"""
    print(folder)
    util.setup(folder)
    conn = sqlite3.connect(util.DB_PATH, timeout=120)
    conn.text_factory = str
    cursor = conn.execute('''SELECT pid from Patient where study_id = ?''', ("qol_grade3,4", ))

    img = nib.load("/home/dahoiv/disk/data/MolekylareMarkorer/lobes_brain.nii")
    lobes_brain = img.get_data()
    label_defs = util.get_bigger_label_defs()
    label_defs_r_l = util.get_right_left_label_defs()
    res_lobes_brain = {}

    book = Workbook()
    sheet = book.active

    sheet.cell(row=1, column=1).value = 'PID'
    sheet.cell(row=1, column=2).value = 'Lobe'
    sheet.cell(row=1, column=3).value = 'Right/Left'
    # sheet.cell(row=1, column=3).value = 'Center of mass'
    k = 2
    for pid in cursor:
        pid = pid[0]

        _id = conn.execute('''SELECT id from Images where pid = ?''', (pid, )).fetchone()
        if not _id:
            print("---No data for ", pid)
            continue
        _id = _id[0]

        _filepath = conn.execute("SELECT filepath_reg from Labels where image_id = ?",
                                 (_id, )).fetchone()[0]
        if _filepath is None:
            print("No filepath for ", pid)
            continue

        com, com_idx = util.get_center_of_mass(util.DATA_FOLDER + _filepath)

        lobe = label_defs.get(lobes_brain[com_idx[0], com_idx[1], com_idx[2]], 'other')
        res_lobes_brain[pid] = lobe

        sheet.cell(row=k, column=1).value = pid
        sheet.cell(row=k, column=2).value = lobe
        sheet.cell(row=k, column=3).value = 'left' if com_idx[0] < 99 else 'right'
        rl = label_defs_r_l.get(lobes_brain[com_idx[0], com_idx[1], com_idx[2]], 'other')
        if rl != 'unknown' and rl != sheet.cell(row=k, column=3).value:
            print("\n\n\n", pid)
        # sheet.cell(row=k, column=3).value = str(com[0]) + " " + str(com[1]) + " " + str(com[2])
        # sheet.cell(row=k, column=4).value = str(com_idx[0]) + " " + str(com_idx[1]) + " " + str(com_idx[2])
        k += 1

    book.save("brain_lobes.xlsx")

    print(res_lobes_brain, len(res_lobes_brain))
예제 #13
0
def process4(folder):
    """ Post process data tumor volume"""
    print(folder)
    util.setup(folder)
    default_value = 0
    label = 'all'

    for image_ids in [do_img_registration_GBM.find_images(), find_images(), find_images_163()]:
        result = util.post_calculations(image_ids)
        (image_ids_with_qol, qol) = util.get_tumor_volume(image_ids)
        num = len(result['all'])
        print(num)
        util.median_calculation(result[label], 'tumor_volume_N=' + str(num), qol, True, folder, default_value=default_value)
예제 #14
0
def process(folder, glioma_grades):
    """ Post process data """
    print(folder)
    util.setup(folder)
    params = [
        'Index_value', 'Global_index', 'Mobility', 'Selfcare', 'Activity',
        'Pain', 'Anxiety', 'karnofsky'
    ]
    (image_ids, qol) = util.get_image_id_and_qol(None,
                                                 exclude_pid,
                                                 glioma_grades=glioma_grades)
    print(len(image_ids))
    result = util.post_calculations(image_ids)
    print(len(result['all']))
    util.avg_calculation(result['all'],
                         'all',
                         None,
                         True,
                         folder,
                         save_sum=True)
    util.avg_calculation(result['img'], 'img', None, True, folder)

    # (image_ids, qol) = util.get_image_id_and_qol('Index_value', exclude_pid, glioma_grades=glioma_grades)
    # result = util.post_calculations(image_ids)
    # util.calculate_t_test(result['all'], 1)

    for qol_param in params:
        (image_ids, qol) = util.get_image_id_and_qol(qol_param, exclude_pid)
        if not qol_param == "karnofsky":
            qol = [_temp * 100 for _temp in qol]
        if not qol_param == "Index_value":
            default_value = -100
        else:
            default_value = 0
        print(image_ids)
        result = util.post_calculations(image_ids)
        for label in result:
            print(label)
            if label == 'img':
                continue
            util.avg_calculation(result[label],
                                 label + '_' + qol_param,
                                 qol,
                                 True,
                                 folder,
                                 default_value=default_value)
            util.std_calculation(result[label], label + '_' + qol_param, qol,
                                 True, folder)
예제 #15
0
def process(folder, exclude):
    """ Post process data """
    print(folder)
    util.setup(folder, "meningiomer")

    image_ids = find_images(exclude=exclude)

    result = util.post_calculations(image_ids)
    print(len(result['all']))
    util.avg_calculation(result['all'],
                         'all',
                         None,
                         True,
                         folder,
                         save_sum=True,
                         save_pngs=True)
    util.avg_calculation(result['img'], 'img', None, True, folder)
예제 #16
0
def find_bad_registration():
    util.setup(folder, "meningiomer")

    image_ids = find_images()
    template_mask = nib.load(util.TEMPLATE_MASK).get_data()
    conn = sqlite3.connect(util.DB_PATH, timeout=120)
    k = 0
    exclude = []
    for _id in image_ids:
        _filepath = conn.execute(
            "SELECT filepath_reg from Labels where image_id = ?",
            (_id, )).fetchone()[0]
        com, com_idx = util.get_center_of_mass(util.DATA_FOLDER + _filepath)
        if template_mask[com_idx[0], com_idx[1], com_idx[2]] == 0:
            print(_filepath)
            k += 1
            exclude.append(_id)
    print(k)
    return exclude
예제 #17
0
def main():
    config, device = util.setup()
    logger.setLevel(getattr(logging, config['log_level'].upper()))
    gnn = import_module('gnn' if config['mlp_arch'] else 'gnn_old')

    if config['method'] == 'reinforce':
        model = gnn.ReinforcePolicy
    elif config['method'] == 'reinforce_multi':
        model = gnn.ReinforcePolicy
    elif config['method'] == 'pg':
        model = gnn.PGPolicy
    elif config['method'] == 'a2c':
        model = gnn.A2CPolicy

    if config['model_path']:
        logger.info('Loading model parameters from {}'.format(
            config['model_path']))
        policy = torch.load(config['model_path']).to(device)

        if config['load_with_noise']:
            with torch.no_grad():
                # for p in policy.parameters():
                #     p.add_(torch.randn(p.size()) * 0.02)
                for p in policy.policy_readout.parameters():
                    p.add_(torch.randn(p.size()) * 0.1)
    else:
        if config['mlp_arch']:
            policy = model(3, config['gnn_hidden_size'],
                           config['readout_hidden_size'], config['mlp_arch'],
                           config['gnn_iter'], config['gnn_async']).to(device)
        else:
            policy = model(3, config['gnn_hidden_size'],
                           config['readout_hidden_size']).to(device)
    optimizer = getattr(optim, config['optimizer'])(policy.parameters(),
                                                    lr=config['lr'])
    scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer,
        milestones=config['lr_milestones'],
        gamma=config['lr_decay'])
    ls = LocalSearch(policy, device, config)

    train_sets, eval_set = load_data(config['data_path'], config['train_sets'],
                                     config['eval_set'],
                                     config['data_shuffle'])

    for i in range(1, config['cycles'] + 1):
        logger.info(f'Cycle: {i}')
        for train_set in train_sets:
            logger.info('Train set: {}'.format(train_set['name']))
            train(ls, optimizer, scheduler, (train_set, eval_set), config)
예제 #18
0
def process_vlsm(folder, glioma_grades):
    """ Post process vlsm data """
    print(folder)
    util.setup(folder)
    params = [
        'Index_value', 'Mobility', 'Selfcare', 'Activity', 'Pain', 'Anxiety',
        'karnofsky'
    ]
    stat_func = [util.brunner_munzel_test, [util.mannwhitneyu_test] * 6]
    for (qol_param, stat_func_i) in zip(params, stat_func):
        (image_ids, qol) = util.get_image_id_and_qol(qol_param, exclude_pid)
        print(image_ids)
        result = util.post_calculations(image_ids)
        for label in result:
            print(label)
            if label == 'img':
                continue
            util.vlsm(result[label],
                      label + '_' + qol_param,
                      stat_func_i,
                      qol,
                      folder,
                      n_permutations=100)
예제 #19
0
def main():
    config, device = util.setup()
    logger.setLevel(getattr(logging, config['log_level'].upper()))

    train_data, dev_data = load_data(config['data_path'], config['no_dev'])
    if dev_data:
        dev_batches = list(
            create_batches(dev_data, config['eval_batch_size'], device))

    gnn = GraphClassifier(2, config['gnn_hidden_size'],
                          config['readout_hidden_size']).to(device)
    optimizer = optim.Adam(gnn.parameters(), lr=config['lr'])
    criterion = nn.BCELoss()

    batch_count = 0
    best_dev_acc = 0

    for epoch in range(1, config['epochs'] + 1):
        train_loss = train_correct = sample_count = 0

        for data in create_batches(train_data, config['batch_size'], device):
            gnn.train()
            z = gnn(data)
            loss = criterion(z, data.y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_count += 1
            sample_count += data.y.shape[0]
            train_loss += loss.item() * data.y.shape[0]
            train_correct += pred_correct(z, data.y)

            if batch_count % config['report_interval'] == 0:
                log(epoch, batch_count, train_loss / sample_count,
                    train_correct / sample_count)
                train_loss = train_correct = sample_count = 0

            if not config[
                    'no_dev'] and batch_count % config['eval_interval'] == 0:
                gnn.eval()
                dev_loss, dev_acc = eval(gnn, criterion, dev_batches)
                logger.info('(Dev)  Loss: {:.4f},  Acc: {:.4f}'.format(
                    dev_loss, dev_acc))
                if dev_acc > best_dev_acc:
                    best_dev_acc = dev_acc
                    torch.save(gnn, join(config['dir'], 'model.pth'))

    torch.save(gnn, join(config['dir'], 'model_final.pth'))
예제 #20
0
def evaluate(path_to_config, path_to_model):
    """
    Evaluate the network on test data using the model stored in `path_to_model`.

    Args:
        path_to_config (str): Path to configuration file
        path_to_model (str): Path to the saved model

    Returns:
        object: the paths of the session
    """

    config, paths, session_id = setup(path_to_config, 1)
    assert isinstance(config, ExperimentConfig)
    logger = logging.getLogger("%s.main" % config.name)

    logger.info("Evaluating network on test data")

    network = Network(config, paths, session_id, 0)
    network.build()
    network.evaluate(DATA_TYPE_TEST, model_path=path_to_model)

    return paths
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 19 15:32:12 2016

@author: dahoiv
"""

import os
import sqlite3

import util

if __name__ == "__main__":
    os.nice(19)

    util.setup("temp/", "")

    all_filepaths = ['brainSegmentation.db']

    conn = sqlite3.connect(util.DB_PATH)
    conn.text_factory = str
    cursor = conn.execute('''SELECT filepath, transform, filepath_reg from Images''')
    for (filepath, transform, filepath_reg) in cursor:
        all_filepaths.extend(util.ensure_list(filepath))
        all_filepaths.extend(util.ensure_list(filepath_reg))
        if transform is None:
            continue
        for _transform in transform.split(","):
            all_filepaths.append(_transform.strip())

    cursor = conn.execute('''SELECT filepath, filepath_reg from Labels''')
예제 #22
0
import datetime
import sqlite3
import glob
import util
from img_data import img_data
from image_registration import move_vol

# pylint: disable= invalid-name
if __name__ == "__main__":  # if 'unity' in hostname or 'compute' in hostname:

    #    new_segmentations_folder = '/Volumes/Neuro/Segmentations/oppdaterte_filer/'
    new_segmentations_folder = '/media/leb/data/oppdaterte_filer/'

    temp_folder = "ADD_SEGMENTATIONS_" + "{:%Y%m%d_%H%M}".format(
        datetime.datetime.now()) + "/"
    util.setup(temp_folder, 'glioma')

    conn = sqlite3.connect(util.DB_PATH)
    cursor = conn.cursor()

    modified_patients = [
        subfolder_name
        for subfolder_name in os.listdir(new_segmentations_folder) if
        os.path.isdir(os.path.join(new_segmentations_folder, subfolder_name))
    ]

    for pid in modified_patients:
        #print('PATIENT ' + pid)
        cursor.execute("SELECT id FROM Images WHERE pid = ?", (int(pid), ))
        image_id = cursor.fetchone()[0]
        print(image_id)
from laplacian_loss import laplacian_loss

opt = get_opt()
print_opt()

# hyper parameters
n_epochs = opt.epochs
batch_size = opt.batch_size
z_dim = opt.z_dim
x_dim = opt.x_dim
sample_size = opt.sample_size
lr = opt.lr
log_step = 100
sample_step = 1000

sample_path, model_path = setup()

image_path = os.path.join(os.getcwd(), 'CelebA', '128_crop')

train_loader = get_loader(image_path=image_path,
                          image_size=opt.x_dim,
                          batch_size=opt.batch_size,
                          num_workers=2)

image_path = os.path.join(os.getcwd(), 'CelebA', '128_crop')

img_size = opt.x_dim

# choose generator type laplacian generator or common dcgan generator
generator = Generator2()
    util.generate_image(img.pre_processed_filepath, image_registration.TEMPLATE_VOLUME)


def test_be(moving_image_ids, reg):
    start_time = datetime.datetime.now()
    for moving_image_id in moving_datasets_ids:
        _test_be(moving_image_id, reg)
    bet_time = datetime.datetime.now() - start_time
    print("\n\n\n\n -- Run time BET: ")
    print(bet_time/len(moving_datasets_ids))

# pylint: disable= invalid-name
if __name__ == "__main__":
    os.nice(19)
    util.setup("GBM_test/", "GBM")
    moving_datasets_ids = []

    reg = ants.Registration()
    # reg.inputs.args = "--verbose 1"
    reg.inputs.collapse_output_transforms = True
    reg.inputs.moving_image = image_registration.TEMPLATE_VOLUME

    reg.inputs.num_threads = 8
    reg.inputs.initial_moving_transform_com = True

    reg.inputs.transforms = ['Rigid', 'Affine']
    reg.inputs.metric = ['MI', 'MI']
    reg.inputs.radius_or_number_of_bins = [32, 32]
    reg.inputs.metric_weight = [1, 1]
    reg.inputs.convergence_window_size = [5, 5]
def process(folder, pids_to_exclude=()):
    """ Post process data """
    print(folder)
    util.setup(folder, 'MolekylareMarkorer')
    conn = sqlite3.connect(util.DB_PATH, timeout=120)
    conn.text_factory = str
    cursor = conn.execute(
        '''SELECT pid from MolekylareMarkorer ORDER BY pid''')
    image_ids = []
    image_ids_1 = []
    image_ids_2 = []
    image_ids_3 = []
    tag_data_1 = []
    tag_data_2 = []
    tag_data_3 = []
    img = nib.load("/media/leb/data/Atlas/lobes_brain.nii")
    lobes_brain = img.get_data()
    label_defs = util.get_label_defs()
    res_right_left_brain = {}
    res_lobes_brain = {}
    patients = '\nPID  MM\n----------------\n'

    for pid in cursor:
        pid = pid[0]
        if pid in pids_to_exclude:
            continue

        _id = conn.execute('''SELECT id from Images where pid = ?''',
                           (pid, )).fetchone()
        if not _id:
            print("---No data for ", pid)
            continue
        _id = _id[0]

        _mm = conn.execute(
            "SELECT Subgroup from MolekylareMarkorer where pid = ?",
            (pid, )).fetchone()[0]
        if _mm is None:
            print("No mm data for ", pid)
            patients += str(pid) + ': ?\n'
            continue

        _desc = conn.execute(
            "SELECT comments from MolekylareMarkorer where pid = ?",
            (pid, )).fetchone()[0]
        if _desc is None:
            _desc = ""

        _filepath = conn.execute(
            "SELECT filepath_reg from Labels where image_id = ?",
            (_id, )).fetchone()[0]
        if _filepath is None:
            print("No filepath for ", pid)
            continue

        com, com_idx = util.get_center_of_mass(util.DATA_FOLDER + _filepath)
        val = {}
        val['Name'] = str(pid) + "_" + str(_mm)
        val['PositionGlobal'] = str(com[0]) + "," + str(com[1]) + "," + str(
            com[2])
        val['desc'] = str(_desc)

        lobe = label_defs[lobes_brain[com_idx[0], com_idx[1], com_idx[2]]]
        right_left = 'left' if com_idx[0] < 99 else 'right'
        res_lobes_brain[lobe] = res_lobes_brain.get(lobe, [0, 0, 0])
        res_right_left_brain[right_left] = res_right_left_brain.get(
            right_left, [0, 0, 0])
        print(right_left, lobe)
        if _mm == 1:
            res_lobes_brain[lobe][0] += 1
            res_right_left_brain[right_left][0] += 1
            image_ids_1.extend([_id])
        elif _mm == 2:
            res_lobes_brain[lobe][1] += 1
            res_right_left_brain[right_left][1] += 1
            image_ids_2.extend([_id])
        elif _mm == 3:
            res_lobes_brain[lobe][2] += 1
            res_right_left_brain[right_left][2] += 1
            image_ids_3.extend([_id])

        image_ids.extend([_id])
        print(pid, _mm)
        patients += str(pid) + ': ' + str(_mm) + '\n'
        if _mm == 1:
            tag_data_1.append(val)
        elif _mm == 2:
            tag_data_2.append(val)
        elif _mm == 3:
            tag_data_3.append(val)

    print(format_dict(res_lobes_brain))
    lobes_brain_file = open(folder + "lobes_brain.txt", 'w')
    lobes_brain_file.write(format_dict(res_lobes_brain))
    lobes_brain_file.close()
    lobes_brain_file = open(folder + "lobes_brain.txt", 'a')
    lobes_brain_file.write(format_dict(res_right_left_brain))
    lobes_brain_file.write(patients)
    lobes_brain_file.close()

    print(len(image_ids))

    result = util.post_calculations(image_ids_1)
    util.avg_calculation(result['all'],
                         'mm_1',
                         None,
                         True,
                         folder,
                         save_sum=True)

    result = util.post_calculations(image_ids_2)
    util.avg_calculation(result['all'],
                         'mm_2',
                         None,
                         True,
                         folder,
                         save_sum=True)

    result = util.post_calculations(image_ids_3)
    util.avg_calculation(result['all'],
                         'mm_3',
                         None,
                         True,
                         folder,
                         save_sum=True)

    result = util.post_calculations(image_ids)
    util.avg_calculation(result['all'],
                         'mm_1_2_3',
                         None,
                         True,
                         folder,
                         save_sum=True)

    return
    tag_data = {
        "tag_data_1": tag_data_1,
        "tag_data_2": tag_data_2,
        "tag_data_3": tag_data_3
    }
    pickle.dump(tag_data, open("tag_data.pickle", "wb"))

    cursor.close()
    conn.close()
    util.write_fcsv("mm_1", folder, tag_data_1, '1 0 0', 13)
    util.write_fcsv("mm_2", folder, tag_data_2, '0 1 0', 5)
    util.write_fcsv("mm_3", folder, tag_data_3, '0 0 1', 6)
    result = util.post_calculations(image_ids)
    util.avg_calculation(result['all'],
                         'all',
                         None,
                         True,
                         folder,
                         save_sum=True)
    util.avg_calculation(result['img'], 'img', None, True, folder)
def process_labels(folder, pids_to_exclude=()):
    """ Post process data tumor volume"""
    print(folder)
    util.setup(folder, 'MolekylareMarkorer')
    conn = sqlite3.connect(util.DB_PATH, timeout=120)
    conn.text_factory = str
    cursor = conn.execute(
        '''SELECT pid from MolekylareMarkorer ORDER BY pid''')

    atlas_path = util.ATLAS_FOLDER_PATH + 'Hammers/Hammers_mith-n30r95-MaxProbMap-full-MNI152-SPM12.nii.gz'
    atlas_resampled_path = folder + 'Hammers_mith-n30r95-MaxProbMap-full-MNI152-SPM12_resample.nii.gz'
    resample = brainsresample.BRAINSResample(
        command=util.BRAINSResample_PATH,
        inputVolume=atlas_path,
        outputVolume=os.path.abspath(atlas_resampled_path),
        referenceVolume=os.path.abspath(util.TEMPLATE_VOLUME))
    resample.run()

    img = nib.load(atlas_resampled_path)
    lobes_brain = img.get_data()
    label_defs = util.get_label_defs_hammers_mith()
    res_lobes_brain = {}

    coordinates_svz = util.get_label_coordinates(util.ATLAS_FOLDER_PATH +
                                                 'SubventricularZone2.nii.gz')
    surface_dg = util.get_surface(util.ATLAS_FOLDER_PATH +
                                  'DentateGyrus.nii.gz')

    book = Workbook()
    sheet = book.active

    sheet.cell(row=1, column=1).value = 'PID'
    sheet.cell(row=1, column=2).value = 'MM'
    sheet.cell(row=1, column=3).value = 'Lobe, center of tumor'
    sheet.cell(row=1,
               column=4).value = 'Distance from SVZ to center of tumor (mm)'
    sheet.cell(row=1,
               column=5).value = 'Distance from SVZ to border of tumor (mm)'
    sheet.cell(row=1,
               column=6).value = 'Distance from DG to center of tumor (mm)'
    sheet.cell(row=1,
               column=7).value = 'Distance from DG to border of tumor (mm)'
    sheet.cell(row=1, column=8).value = 'Tumor volume (mm^3)'
    i = 8
    label_defs_to_column = {}
    for key in label_defs:
        i += 1
        sheet.cell(row=1, column=i).value = label_defs[key]
        label_defs_to_column[key] = i
    k = 2
    for pid in cursor:
        pid = pid[0]

        if pid in pids_to_exclude:
            continue

        _id = conn.execute('''SELECT id from Images where pid = ?''',
                           (pid, )).fetchone()
        if not _id:
            print("---No data for ", pid)
            continue
        _id = _id[0]

        _filepath = conn.execute(
            "SELECT filepath_reg from Labels where image_id = ?",
            (_id, )).fetchone()[0]
        if _filepath is None:
            print("No filepath for ", pid)
            continue

        com, com_idx = util.get_center_of_mass(util.DATA_FOLDER + _filepath)
        surface = util.get_surface(util.DATA_FOLDER + _filepath)

        print(pid, com_idx)

        dist_from_svz_to_com = distance.cdist(coordinates_svz, [com],
                                              'euclidean').min()
        dist_from_svz_to_border = distance.cdist(coordinates_svz,
                                                 surface['point_cloud'],
                                                 'euclidean').min()
        dist_from_dg_to_com = util.get_min_distance(surface_dg, [com])
        dist_from_dg_to_border = util.get_min_distance(surface_dg,
                                                       surface['point_cloud'])

        lobe = label_defs.get(lobes_brain[com_idx[0], com_idx[1], com_idx[2]],
                              'other')
        res_lobes_brain[pid] = lobe

        img = nib.load(util.DATA_FOLDER + _filepath)
        tumor_data = img.get_data()
        voxel_size = img.header.get_zooms()
        voxel_volume = np.prod(voxel_size[0:3])
        n_voxels = (tumor_data > 0).sum()
        tumor_volume = n_voxels * voxel_volume

        union_data = lobes_brain * tumor_data
        union_data = union_data.flatten()
        lobe_overlap = ''
        for column in range(1, 1 + max(label_defs_to_column.values())):
            sheet.cell(row=k, column=column).value = 0
        for _lobe in np.unique(union_data):
            column = label_defs_to_column.get(_lobe)
            if column is None:
                continue
            sheet.cell(row=k, column=column).value = 1
            lobe_overlap += label_defs.get(_lobe, '') + ', '

        _mm = conn.execute(
            "SELECT Subgroup from MolekylareMarkorer where pid = ?",
            (pid, )).fetchone()[0]

        sheet.cell(row=k, column=1).value = pid
        sheet.cell(row=k, column=2).value = str(_mm)
        sheet.cell(row=k, column=3).value = lobe
        sheet.cell(row=k, column=4).value = round(dist_from_svz_to_com, 2)
        sheet.cell(row=k, column=5).value = round(dist_from_svz_to_border, 2)
        sheet.cell(row=k, column=6).value = round(dist_from_dg_to_com, 2)
        sheet.cell(row=k, column=7).value = round(dist_from_dg_to_border, 2)
        sheet.cell(row=k, column=8).value = round(tumor_volume, 1)

        k += 1

    book.save(folder + "brain_lobes_Hammers_mith_n30r95.xlsx")

    print(res_lobes_brain, len(res_lobes_brain))
예제 #27
0
파일: wechat.py 프로젝트: jimlin95/hm
 def setUp(self):
     super(WechatTest, self).setUp()
     u.setup(d)
예제 #28
0
파일: video.py 프로젝트: jimlin95/hm
 def setUp(self):
     super(VideoTest, self).setUp()
     u.setup(d)
예제 #29
0
 def setUp(self):
     super(AngrybirdTest, self).setUp()
     u.setup(d)
예제 #30
0
파일: baidu.py 프로젝트: jimlin95/hm
 def setUp(self):
     super(BaiduTest, self).setUp()
     u.setup(d)
예제 #31
0
 def setUp(self):
     super(ContactTest, self).setUp()
     u.setup(d)
if __name__ == "__main__":
    #    if False:
    #        import do_img_registration_LGG_POST as do_img_registration
    #        util.setup("LGG_POST_RES/", "LGG")
    #    elif False:
    #        import do_img_registration_LGG_PRE as do_img_registration
    #        util.setup("LGG_PRE_RES/", "LGG")
    #    elif False:
    #        import do_img_registration_GBM as do_img_registration
    #        util.setup("GBM_RES2/", "GBM")

    params = ['Index_value', 'Global_index', 'Mobility', 'Selfcare', 'Activity', 'Pain', 'Anxiety']
    util.mkdir_p("LGG_GBM_RES")

    FOLDER = "LGG_GBM_RES/"  # "LGG_GBM_RES/GBM"
    util.setup(FOLDER)

    (image_ids, qol) = util.get_image_id_and_qol('Index_value')
    print(image_ids, len(image_ids))
    result = util.post_calculations(image_ids)
    util.calculate_t_test(result['all'], 0.85)

    for qol_param in params:
        (image_ids, qol) = util.get_image_id_and_qol(qol_param)
        print(image_ids)
        result = util.post_calculations(image_ids)
        for label in result:
            print(label)
            if label == 'img':
                continue
            util.avg_calculation(result[label], label + '_' + qol_param, qol, True, FOLDER)

def find_images():
    """ Find images for registration """
    conn = sqlite3.connect(util.DB_PATH)
    conn.text_factory = str
    cursor = conn.execute('''SELECT pid from Patient''')
    ids = []
    for row in cursor:
        cursor2 = conn.execute('''SELECT id from Images where pid = ? AND diag_pre_post = ?''',
                               (row[0], "pre"))
        for _id in cursor2:
            ids.append(_id[0])
        cursor2.close()

    cursor.close()
    conn.close()
    return ids


# pylint: disable= invalid-name
if __name__ == "__main__":
    os.nice(19)
    util.setup("LGG_PRE/")

    pre_images = find_images()

    data_transforms = image_registration.get_transforms(pre_images, image_registration.SYN)

#    image_registration.save_transform_to_database(data_transforms)
예제 #34
0
        'limit': 100,
        'sr_detail': True
    }

    usernames = util.get_usernames()

    for user in usernames:
        print user
        continueWithUser = True

        while continueWithUser:
            r = reddit.api_get('user/' + user.strip() + '/submitted', params)
            if r != None:
                for thing in r['data']['children']:
                    try:
                        user_comments.insert_one(thing)
                    except pymongo.errors.DuplicateKeyError:
                        continue
                if r['data']['after'] is None:
                    continueWithUser = False
                    break
                else:
                    params['after'] = r['data']['after']
            else:
                continueWithUser = False


if __name__ == '__main__':
    util.setup()
    get_reddit_user_content()
예제 #35
0
import util as util
import math
import time

#start_time = time.time()

# import/parse location and connection file contents
util.setup()

#Receive the input from the user for the starting and ending cities
temp = input("What is the starting city? ")
while(util.verifyInput(temp)!=True):
      print("\nPlease input a valid city")
      temp = input("What is the starting city? ")
start = temp

temp   = input("\nWhat is the ending city? ")
while(util.verifyInput(temp)!=True):
      print("\nPlease input a valid city")
      temp = input("What is the ending city? ")
end = temp

skip = []
temp = input("\nAre there any cities that you would not want to visit? Please enter \"Help\" if you would like to see a list of cities. Or refer to the \"locations.txt\" file.\n")
while(temp == "Help"):
      for i in util.locations:
            if(i != "END"):
                  print(i)
      temp = input("What city would you like to skip?\n")
      while(temp!= ""):
            if(temp in util.locations):
예제 #36
0
파일: gallery.py 프로젝트: jimlin95/hm
 def setUp(self):
     super(GalleryTest, self).setUp()
     u.setup(d)
예제 #37
0
    util.debug_print('removing from excludes file the line: ' + remove_line)
    update_excludes = util.updateFile('excludes', remove_line, addLine = False)
        
    # remove that name from slaves file
    util.debug_print('removing from slaves file the line: ' + str(remove_line))
    update_slaves = util.updateFile('slaves', remove_line, addLine = False)
    
    # get vmid from slaveName
    vmid = util.get_vm_id_by_name(slaveName)
    
    # NOW deestroy vm
    util.debug_print('Now we will be trying to destroy the machine with ID: ' + str(vmid))
    result = api.destroyVirtualMachine({'id': vmid})
    
    util.debug_print('waiting for the destroyed machine to be finished being destroyed')
    waitResult = util.waitForAsync(result.get('jobid'))
    
    # since we destroyed the vm, we can remove from master's /etc/hosts file
    hosts = util.get_file_content(config.DEFAULT_DESTINATION_HOSTS_FILENAME)
    checker = re.compile('.*' + slaveName + '\n')
    to_be_removed_hosts_line = [line for line in hosts if checker.match(line) is not None]
    util.debug_print('remove line:' + str(to_be_removed_hosts_line) + ' from /etc/hosts file')
    util.updateFile('hosts', to_be_removed_hosts_line[0], addLine = False)

    util.debug_print('Done destroying VM.')    
    return True
    
# basic global stuff
api, pp = util.setup()

        _img = img_data(img_id, db_path, util.TEMP_FOLDER_PATH)
        _img.load_db_transforms()
        print(_img.transform)
        if _img.transform is None:
            continue
        _img.processed_filepath = image_registration.move_vol(_img.img_filepath, _img.get_transforms())
        _img.image_id = ny_img_id
        data_transforms.append(_img)

    image_registration.save_transform_to_database(data_transforms)


if __name__ == "__main__":
    os.nice(19)

    util.setup("temp_convert/", "LGG")
    util.mkdir_p(util.TEMP_FOLDER_PATH)

    util.DATA_FOLDER = "/mnt/dokumneter/data/database/"

    if True:
        db_path = "/home/dahoiv/disk/data/database3/LGG/"
        util.DATA_FOLDER = util.DATA_FOLDER + "LGG" + "/"
        util.DB_PATH = util.DATA_FOLDER + "brainSegmentation.db"

        convert_table_inv = ConvertDataToDB.get_convert_table('/home/dahoiv/disk/data/Segmentations/NY_PID_LGG segmentert.xlsx')
        convert_table = {v: k for k, v in convert_table_inv.items()}
        print(convert_table)
        print(util.DB_PATH)
        conn = sqlite3.connect(util.DB_PATH)
        conn.text_factory = str
    conn.close()

    import datetime
    start_time = datetime.datetime.now()
    pre_img = img_data(pre_image_id, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)
    post_img = img_data(moving_image_id, util.DATA_FOLDER, util.TEMP_FOLDER_PATH)

    pre_img = image_registration.pre_process(pre_img, False)
    post_img = image_registration.pre_process(post_img, False)
    img = image_registration.registration(post_img, pre_img.pre_processed_filepath,
                                          image_registration.RIGID)
    print("\n\n\n\n -- Total run time: ")
    print(datetime.datetime.now() - start_time)

    img.fixed_image = pre_image_id

    return img


# pylint: disable= invalid-name
if __name__ == "__main__":
    os.nice(19)
    util.setup("LGG_POST/")

    post_images = find_images()
    data_transforms = image_registration.get_transforms(post_images,
                                                        process_dataset_func=process_dataset,
                                                        save_to_db=True)

#    image_registration.save_transform_to_database(data_transforms)
예제 #40
0
 def setUp(self):
     super(MusicTest, self).setUp()
     u.setup(d)
예제 #41
0
 def setUp(self):
     super(MessageTest, self).setUp()
     u.setup(d)
    cursor = conn.execute('''SELECT pid from Patient''')
    ids = []
    for row in cursor:
        pid = row[0]
        if pid not in pids_with_qol:
            continue
        cursor2 = conn.execute('''SELECT id from Images where pid = ? AND diag_pre_post = ?''',
                               (pid, "pre"))

        for _id in cursor2:
            ids.append(_id[0])
        cursor2.close()

    cursor.close()
    conn.close()
    return ids


# pylint: disable= invalid-name
if __name__ == "__main__":
    os.nice(17)
    util.setup("GBM_LGG_TEMP_" + "{:%m_%d_%Y}_BE2".format(datetime.datetime.now()) + "/")

    moving_datasets_ids = find_images()
    print(moving_datasets_ids, len(moving_datasets_ids))
    data_transforms = image_registration.get_transforms(moving_datasets_ids,
                                                        image_registration.RIGID,
                                                        save_to_db=True)

    # image_registration.save_transform_to_database(data_transforms)
                    help=('A boolean indicating whether running this sample '
                          'will make changes. No changes will occur if this '
                          'is set True.'))


def main(doubleclick_bid_manager, body):
    # Construct the request.
    request = doubleclick_bid_manager.lineitems().uploadlineitems(body=body)
    response = request.execute()

    if 'uploadStatus' in response and 'errors' in response['uploadStatus']:
        for error in response['uploadStatus']['errors']:
            print(error)
    else:
        print('Upload Successful.')


if __name__ == '__main__':
    args = util.get_arguments(sys.argv, __doc__, parents=[parser])

    file_path = args.file_path
    if not os.path.isabs(file_path):
        file_path = os.path.expanduser(file_path)

    with open(file_path, 'rb') as handle:
        line_items = handle.read().decode('utf-8')

    BODY = {'dryRun': args.dry_run, 'lineItems': line_items}

    main(util.setup(args), BODY)
예제 #44
0
from sys import argv
from util import setup, init, select, test


def show_help():
    print("""Usage:
trainer.py [cmd]
cmd: setup or test""")


if __name__ == '__main__':
    init()

    if len(argv) >= 2:
        cmd = argv[1]

        if cmd == 'setup':
            setup()
        elif cmd == 'select':
            select(int(argv[2]))
        elif cmd == 'test':
            test()
        # elif cmd == 'add':
        #     repo = argv[2]
        else:
            show_help()
    else:
        show_help()

def find_images():
    """ Find images for registration """
    conn = sqlite3.connect(util.DB_PATH)
    conn.text_factory = str
    cursor = conn.execute('''SELECT pid from Patient''')
    ids = []
    for row in cursor:
        cursor2 = conn.execute('''SELECT id from Images where pid = ?''', (row[0], ))
        for _id in cursor2:
            ids.append(_id[0])
        cursor2.close()

    cursor.close()
    conn.close()
    print(ids)
    return ids


# pylint: disable= invalid-name
if __name__ == "__main__":
    os.nice(19)
    util.setup("GBM/")

    moving_datasets_ids = find_images()

    data_transforms = image_registration.get_transforms(moving_datasets_ids, image_registration.SYN)

#    image_registration.save_transform_to_database(data_transforms)
예제 #46
0
    update_slaves = util.updateFile('slaves', remove_line, addLine=False)

    # get vmid from slaveName
    vmid = util.get_vm_id_by_name(slaveName)

    # NOW deestroy vm
    util.debug_print('Now we will be trying to destroy the machine with ID: ' +
                     str(vmid))
    result = api.destroyVirtualMachine({'id': vmid})

    util.debug_print(
        'waiting for the destroyed machine to be finished being destroyed')
    waitResult = util.waitForAsync(result.get('jobid'))

    # since we destroyed the vm, we can remove from master's /etc/hosts file
    hosts = util.get_file_content(config.DEFAULT_DESTINATION_HOSTS_FILENAME)
    checker = re.compile('.*' + slaveName + '\n')
    to_be_removed_hosts_line = [
        line for line in hosts if checker.match(line) is not None
    ]
    util.debug_print('remove line:' + str(to_be_removed_hosts_line) +
                     ' from /etc/hosts file')
    util.updateFile('hosts', to_be_removed_hosts_line[0], addLine=False)

    util.debug_print('Done destroying VM.')
    return True


# basic global stuff
api, pp = util.setup()
예제 #47
0
 def setUp(self):
     super(PhoneTest, self).setUp()
     u.setup(d)
예제 #48
0
 def setUp(self):
     super(CameraTest, self).setUp()
     u.setup(d)
예제 #49
0
def train(path_to_config, verbose=False):
    """
    Train the network for multiple runs (can be specified in the configuration file with the "num_runs" option).
    After finishing the training for a run, the network is evaluated on the development data and the result is stored.
    After finishing all runs, the results are averaged.

    Args:
        path_to_config (str): Path to the configuration file.
        verbose (bool): Whether or not to display additional logging information
    """
    config, paths, session_id = setup(path_to_config)
    assert isinstance(config, ExperimentConfig)
    logger = logging.getLogger("%s.train" % config.name)

    results = []

    # Average results over `config.num_runs` runs
    for i in xrange(config.num_runs):
        logger.info("*" * 80)
        logger.info("* %d. run for experiment %s", (i + 1), config.name)
        logger.info("*" * 80)
        network = Network(config, paths, session_id, i)

        network.build()
        num_actual_epochs, stopped_early = network.train(
            verbose=verbose, log_results_on_dev=True)
        run_results = network.evaluate(data_type=DATA_TYPE_DEV)
        results.append(run_results)

        for task_name, result_list in run_results.items():
            assert isinstance(task_name, str)
            assert isinstance(result_list, ResultList)
            # Write a CSV file per task because each task may have different evaluation metrics
            csv_out_path = os.path.join(
                paths["session_out"],
                "session_results.task_%s.csv" % task_name)
            network.log_result_list_csv(
                task_name, result_list, csv_out_path, {
                    "# planned epochs": config.epochs,
                    "# actual epochs": num_actual_epochs,
                    "stopped early?": stopped_early,
                    "run": i + 1
                })

        logger.info("*" * 80)
        logger.info("")

        # Reset tensorflow variables
        tf.reset_default_graph()

    logger.info("")
    logger.info("Results after %d runs:", config.num_runs)

    timestamp = time.strftime("%Y-%m-%d %H:%M")

    for task in config.tasks:
        task_name = task.name
        task_results = [result[task_name] for result in results]
        # Write a CSV file per task because each task may have different evaluation metrics
        csv_file_path = os.path.join(paths["experiment_out"],
                                     "results.task_%s.csv" % task_name)
        logger.info(" - Task %s", task_name)

        headers = ["timestamp", "session_id", "num_runs", "task_name"]
        values = [timestamp, session_id, config.num_runs, task_name]

        for metric in set(config.eval_metrics + task.eval_metrics):
            metric_values_sum = 0

            for result in task_results:
                metric_values_sum += result.compute_metric_by_name(metric)

            logger.info("  - Average %s at task %s is %.3f", metric.title(),
                        task_name, metric_values_sum / float(config.num_runs))

            headers += ["AVG:%s" % metric.title()]
            values += [metric_values_sum / float(config.num_runs)]

        append_to_csv(csv_file_path, headers=headers, values=values)
예제 #50
0
import os
import sys

import image_registration
import util

# pylint: disable= invalid-name
if __name__ == "__main__":  # if 'unity' in hostname or 'compute' in hostname:
    HOSTNAME = os.uname()[1]
    if 'unity' in HOSTNAME or 'compute' in HOSTNAME:
        path = "/work/danieli/GBM_survival/"
    else:
        os.nice(19)
        path = "GBM_" + "{:%m_%d_%Y}".format(datetime.datetime.now()) + "/"

    util.setup(path)

    #image_ids, survival_days = util.get_image_id_and_survival_days(study_id="GBM_survival_time", registration_date_upper_lim="2018-10-29")
    #image_ids = [10]
    #image_ids = [10, 19, 35, 371, 71, 83,98, 103, 106, 116, 231, 392, 458]
    #image_ids = [10, 19, 71, 83, 98, 103, 106, 116, 231, 392, 458]
    #image_ids = range(454,465)
    #image_ids = [413, 386, 398, 406, 416, 420, 419, 392, 412, 408, 405, 407]
    image_ids = [75, 465, 125, 126, 183, 377, 220]
    #image_ids = [413]

    util.LOGGER.info(str(image_ids) + " " + str(len(image_ids)))
    image_registration.get_transforms(
        image_ids,
        #reg_type=image_registration.RIGID,
        #save_to_db=True,
예제 #51
0
파일: weibo.py 프로젝트: jimlin95/hm
 def setUp(self):
     super(WeiboTest, self).setUp()
     u.setup(d)
예제 #52
0
파일: browser.py 프로젝트: jimlin95/hm
 def setUp(self):
     super(BrowserTest, self).setUp()
     u.setup(d)