示例#1
0
def post_processing_segmentation(test_image_path):
    """
    post_processing_segmentation iterates through all the test results in
    the given directory and retouches all of them using binary morphological
    operators

    param: test_image_path
    return: void
    """

    file_list = test_image_path

    post_process_folder = os.path.join('postprocess')
    create_dir(post_process_folder)

    for iImage, file in enumerate(file_list):
        tic = time.time()

        path, filename = os.path.split(file)
        basename = os.path.splitext(filename)[0]
        savename = os.path.join(post_process_folder, basename + '.mat')

        if not os.path.isfile(savename):
            labels = retouch_segmentation(file)
            matlab.save(savename, {'mask': labels})

        duration = time.time() - tic
        print('process %d / %d images (%.2f sec)' %
              (iImage + 1, len(file_list), duration))
示例#2
0
def validation_loop(sess,define_graph_output,train_val_variables,nValBatches,epoch,checkpoint_folder,flags):
    """
    validation_loop contains the definition for the loop where the weights
    are iteratively modified to optimize the loss function for the validation set

    param: sess
    param: define_graph_output
    param: train_val_variables
    param: nTrainBatches
    param: epoch
    param: checkpoint_folder
    param: n_classes
    return: none
    """

    for step in xrange(nValBatches):
        start_time = time.time()

        # run session
        loss_value_val, precision, recall, f1score, TP, FP, FN, TN,\
            out_val, pred_val = \
            sess.run([define_graph_output['loss_val']['avg_cross_entropy_log_loss'],
                      define_graph_output['accuracy_val_output']['precision'],
                      define_graph_output['accuracy_val_output']['recall'],
                      define_graph_output['accuracy_val_output']['f1score'],
                      define_graph_output['accuracy_val_output']['TP'],
                      define_graph_output['accuracy_val_output']['FP'],
                      define_graph_output['accuracy_val_output']['FN'],
                      define_graph_output['accuracy_val_output']['TN'],
                      define_graph_output['out_content_val'],
                      define_graph_output['sigmoid_all_val']
                      ],
                     feed_dict={define_graph_output['keep_prob']: 1.0})

        duration = time.time() - start_time
        assert not np.isnan(loss_value_val), 'Model diverged with loss = NaN'

        if step % 100 == 0:
            matlab.save(os.path.join(checkpoint_folder, 'val_content.mat'),
                        {'out_val':out_val,'pred_val':pred_val})

        # evaluate
        if not np.isnan(loss_value_val):
            train_val_variables['avg_val_loss'].append(loss_value_val)
        for iclass in range(flags['n_classes']):
            if not np.isnan(precision[iclass]):
                train_val_variables['avg_val_precision'][iclass].append(precision[iclass])
            if not np.isnan(recall[iclass]):
                train_val_variables['avg_val_recall'][iclass].append(recall[iclass])
            if not np.isnan(f1score[iclass]):
                train_val_variables['avg_val_f1score'][iclass].append(f1score[iclass])

        # print
        format_str = ('%s: epoch %d, step %d/ %d (%.2f sec/step)')
        print(format_str % (datetime.now(), epoch, step + 1, nValBatches, duration))
        format_str = ('Validation Loss = %.2f, Precision = %.2f, Recall = %.2f, F1 = %.2f, ' +
                      'TP = %.2f, FP = %.2f, FN = %.2f, TN = %.2f')
        for iclass in range(flags['n_classes']):
            print(format_str % (loss_value_val, precision[iclass], recall[iclass],
                            f1score[iclass], TP[iclass], FP[iclass], FN[iclass], TN[iclass]))
def training_loop(sess, define_graph_output, train_val_variables,
                  nTrainBatches, epoch, checkpoint_folder):
    for step in xrange(nTrainBatches):

        start_time = time.time()

        # run sessing
        _, loss_value_train, precision, recall, f1score, TP, FP, FN, TN,\
            out_train, pred_train = \
            sess.run([define_graph_output['train_op'],
                      define_graph_output['loss_train']['avg_cross_entropy_log_loss'],
                      define_graph_output['accuracy_train_output']['precision'],
                      define_graph_output['accuracy_train_output']['recall'],
                      define_graph_output['accuracy_train_output']['f1score'],
                      define_graph_output['accuracy_train_output']['TP'],
                      define_graph_output['accuracy_train_output']['FP'],
                      define_graph_output['accuracy_train_output']['FN'],
                      define_graph_output['accuracy_train_output']['TN'],
                      define_graph_output['out_content_train'],
                      define_graph_output['sigmoid_all_train']
                      ],
                     feed_dict={define_graph_output['keep_prob']: 0.5})

        duration = time.time() - start_time
        assert not np.isnan(loss_value_train), 'Model diverged with loss = NaN'

        if step % 100 == 0:
            matlab.save(os.path.join(checkpoint_folder, 'train_content.mat'), {
                'out_train': out_train,
                'pred_train': pred_train
            })

        # evaluate
        if not np.isnan(loss_value_train):
            train_val_variables['avg_train_loss'].append(loss_value_train)
        if not np.isnan(precision):
            train_val_variables['avg_train_precision'].append(precision)
        if not np.isnan(recall):
            train_val_variables['avg_train_recall'].append(recall)
        if not np.isnan(f1score):
            train_val_variables['avg_train_f1score'].append(f1score)

        # print
        format_str = ('%s: epoch %d, step %d/ %d (%.2f sec/step)')
        print(format_str %
              (datetime.now(), epoch, step + 1, nTrainBatches, duration))
        format_str = (
            'Training Loss = %.2f, Precision = %.2f, Recall = %.2f, F1 = %.2f, '
            + 'TP = %.2f, FP = %.2f, FN = %.2f, TN = %.2f')
        print(format_str %
              (loss_value_train, precision, recall, f1score, TP, FP, FN, TN))
示例#4
0
def calculate_mean_variance_image(object_folder, flags):
    key_values = list(flags['dict_path'].keys())
    key_values.remove('group')

    # Setup
    network_stats_file_path = os.path.join(object_folder, 'checkpoint',
                                           'network_stats.mat')
    routine.create_dir(os.path.join(object_folder, 'checkpoint'))
    mean_dict = {}

    for key in key_values:
        image_folder = os.path.join(object_folder, 'train', key)
        list_images = glob.glob(
            os.path.join(image_folder, '*' + flags['dict_ext'][key]))

        image = KSimage.imread(list_images[0])
        #if np.random.randint(2, size=1) == 1:
        #    image = np.flipud(image)
        #if np.random.randint(2, size=1) == 1:
        #    image = np.fliplr(image)
        image = np.float32(image)

        mean_image = image
        variance_image = np.zeros(shape=image.shape, dtype=np.float32)

        for t, image_file in enumerate(list_images[1:]):
            image = KSimage.imread(image_file)

            # image = np.dstack((image[:, :, 2], image[:, :, 1], image[:, :, 0]))

            #if np.random.randint(2, size=1) == 1:
            #    image = np.flipud(image)
            #if np.random.randint(2, size=1) == 1:
            #    image = np.fliplr(image)
            image = np.float32(image)

            mean_dict[key + '_mean'] = (np.float32(t + 1) * mean_image +
                                        image) / np.float32(t + 2)

            mean_dict[key + '_var'] = np.float32(t + 1) / np.float32(t + 2) * variance_image \
                             + np.float32(1) / np.float32(t + 1) * ((image - mean_image) ** 2)

            print('calculate mean and variance: processing %d out of %d' %
                  (t + 2, len(list_images)))

    matlab.save(network_stats_file_path, mean_dict)
示例#5
0
def main(nth_fold, mode, flags, testdir):
    """
    main trains, tests, or executes the model on the provided
    data based on the specified preferences

    param: nth_fold
    param: mode
    param: experiment_folder
    param: image_ext
    param: test_model
    param: test_image_list
    return: saves segmentation results to appropriate file/directory
    """

    # check if cv or perm
    list_dir = os.listdir(os.path.join(flags['experiment_folder']))
    if ('cv' + str(nth_fold) in list_dir) and ('perm' + str(nth_fold)
                                               in list_dir):
        raise ValueError('Dangerous! You have both cv and perm on the path.')
    elif 'cv' + str(nth_fold) in list_dir:
        object_folder = os.path.join(flags['experiment_folder'],
                                     'cv' + str(nth_fold))
    elif 'perm' + str(nth_fold) in list_dir:
        object_folder = os.path.join(flags['experiment_folder'],
                                     'perm' + str(nth_fold))
    else:
        raise ValueError('No cv or perm folder!')

    # Train model
    if mode == 'train':
        checkpoint_folder = os.path.join(object_folder, 'checkpoint')
        network_stats_file_path = os.path.join(checkpoint_folder,
                                               'network_stats.mat')

        train_images_folder = os.path.join(object_folder, 'train', 'image')

        if not os.path.isfile(network_stats_file_path):
            list_images = glob.glob(
                os.path.join(train_images_folder, '*' + flags['image_ext']))
            print('calculating mean and variance image')
            mean_image, variance_image = utils.calculate_mean_variance_image(
                list_images)
            routine.create_dir(checkpoint_folder)
            matlab.save(network_stats_file_path, {
                'mean_image': mean_image,
                'variance_image': variance_image
            })

        tf_model_train.train(object_folder, flags)

    # Test model on validation set
    elif mode == 'test_model':
        checkpointlist = glob.glob(
            os.path.join(object_folder, 'checkpoint', 'model*meta'))
        checkpointlist = [
            file for file in checkpointlist if 'pretrain' not in file
        ]
        temp = []
        for filepath in checkpointlist:
            basename = os.path.basename(filepath)
            temp.append(int(float(basename.split('-')[-1].split('.')[0])))
        temp = np.sort(temp)

        model_path = os.path.join(
            object_folder, 'checkpoint',
            'model.ckpt-' + str(temp[flags['test_model']]))
        print('use epoch %d : model %s' % (flags['test_model'], 'model.ckpt-' +
                                           str(temp[flags['test_model']])))
        test_images_list = flags['test_image_list']
        filename_list = KScsv.read_csv(test_images_list)
        tf_model_test.test(object_folder, model_path, filename_list, flags)

    #Segment WSIs
    elif mode == 'test_WSI':
        checkpointlist = glob.glob(
            os.path.join(object_folder, 'checkpoint', 'model*meta'))
        checkpointlist = [
            file for file in checkpointlist if 'pretrain' not in file
        ]
        temp = []
        for filepath in checkpointlist:
            basename = os.path.basename(filepath)
            temp.append(int(float(basename.split('-')[-1].split('.')[0])))
        temp = np.sort(temp)

        model_path = os.path.join(
            object_folder, 'checkpoint',
            'model.ckpt-' + str(temp[flags['test_model']]))
        print('use epoch %d : model %s' % (flags['test_model'], 'model.ckpt-' +
                                           str(temp[flags['test_model']])))

        #should iterate over all subdirectories
        paths = get_immediate_subdirectories(testdir)
        list.sort(paths)  #sort WSIs into ascending numerical order
        #paths = paths[100:] #TODO: Enable based on which batch this code is running
        print("TEST DIR: " + str(testdir))

        for path in paths:
            print(os.path.join(testdir, path))
            if not os.path.isdir(
                    os.path.join(testdir, path + 'epiStromalSeg')
            ):  #prevents this from being executed with exsiting directories
                tf_model_test.testWSI(object_folder, model_path,
                                      os.path.join(testdir, path), flags)

                #TODO: uncomment to process only controls
                #imageCSV = open(os.path.join('/data', 'avellal14', 'WSI_patches', 'BBD_NCC_Covariate_Outcome_KK_JH_modifiedWithPaths.csv'),'rb')
#reader = csv.reader(imageCSV)
#csvList = list(reader)
#patientId = path[:path.index('_')]
#caseControlList =  next(subl for subl in csvList if patientId in subl)
#TODO: uncomment to process only cases
# if(caseControlList[1] == '1'): #only test the WSI if the image is indeed a case(1)
#        tf_model_test.testWSI(object_folder, model_path, os.path.join(testdir,path), flags)

    #Segment WSIs at patient level using data from CSV
    elif mode == 'test_Case_Control':
        checkpointlist = glob.glob(
            os.path.join(object_folder, 'checkpoint', 'model*meta'))
        checkpointlist = [
            file for file in checkpointlist if 'pretrain' not in file
        ]
        temp = []
        for filepath in checkpointlist:
            basename = os.path.basename(filepath)
            temp.append(int(float(basename.split('-')[-1].split('.')[0])))
        temp = np.sort(temp)

        model_path = os.path.join(
            object_folder, 'checkpoint',
            'model.ckpt-' + str(temp[flags['test_model']]))
        print('use epoch %d : model %s' % (flags['test_model'], 'model.ckpt-' +
                                           str(temp[flags['test_model']])))

        with open(
                os.path.join('/home', 'avellal14', 'data', 'Adithya_BBD_NHS',
                             'NHS_BBD_CODE',
                             'casesAndMatchedControls224.csv')) as csvFile:
            csvReader = csv.DictReader(csvFile)
            for row in csvReader:
                if (row['path'] == 'BBD_NCC_extractedat20x'
                        or row['path'] == 'BBD_NCC_extractedat20x_round2'):
                    testdir = os.path.join('/home', 'avellal14', 'data',
                                           'Adithya_BBD_NHS', row['path'])
                    paths = get_subdirectories_by_patient(testdir, row['id'])

                    for path in paths:
                        print('CURRENT WSI BEING SEGMENTED',
                              os.path.join(testdir, path))
                        if not os.path.isdir(
                                os.path.join(testdir, path + '_cellSeg')
                        ):  #prevents this from being executed with exsiting directories
                            tf_model_test.testWSI(object_folder, model_path,
                                                  os.path.join(testdir, path),
                                                  flags)
示例#6
0
def validation_loop(sess,define_graph_output,train_val_variables,nValBatches,epoch,checkpoint_folder,flags):
    for step in xrange(nValBatches):
        start_time = time.time()

        # # random brightness field
        # r0 = np.random.uniform(0, flags['size_input_patch'][0], 10)
        # r1 = np.random.uniform(0, flags['size_input_patch'][1], 10)
        #
        # r0 = np.unique(np.rint(np.append(0, np.append(r0, flags['size_input_patch'][0])))).astype(np.int)
        # r1 = np.unique(np.rint(np.append(0, np.append(r1, flags['size_input_patch'][1])))).astype(np.int)
        #
        # random_field_mat = np.zeros(shape=(flags['size_input_patch'][0], flags['size_input_patch'][1], 1),
        #                             dtype=np.float32)
        # for i in xrange(len(r0) - 1):
        #     for j in xrange(len(r1) - 1):
        #         random_field_mat[r0[i]:r0[i + 1], r1[j]:r1[j + 1], :] = np.random.uniform(0.5, 1.5, 1)

        # run session
        loss_value_val, precision, recall, f1score, TP, FP, FN, TN,\
            out_val, pred_val = \
            sess.run([define_graph_output['loss_val']['avg_cross_entropy_log_loss'],
                      define_graph_output['accuracy_val_output']['precision'],
                      define_graph_output['accuracy_val_output']['recall'],
                      define_graph_output['accuracy_val_output']['f1score'],
                      define_graph_output['accuracy_val_output']['TP'],
                      define_graph_output['accuracy_val_output']['FP'],
                      define_graph_output['accuracy_val_output']['FN'],
                      define_graph_output['accuracy_val_output']['TN'],
                      define_graph_output['out_content_val'],
                      define_graph_output['sigmoid_all_val']
                      ],
                     feed_dict={define_graph_output['keep_prob']: 1.0
                                # define_graph_output['random_field']: random_field_mat
                                })

        duration = time.time() - start_time
        assert not np.isnan(loss_value_val), 'Model diverged with loss = NaN'

        if step % 100 == 0:
            matlab.save(os.path.join(checkpoint_folder, 'val_content.mat'),
                        {'out_val':out_val,'pred_val':pred_val})

        # evaluate
        if not np.isnan(loss_value_val):
            train_val_variables['avg_val_loss'].append(loss_value_val)
        for iclass in range(flags['n_classes']):
            if not np.isnan(precision[iclass]):
                train_val_variables['avg_val_precision'][iclass].append(precision[iclass])
            if not np.isnan(recall[iclass]):
                train_val_variables['avg_val_recall'][iclass].append(recall[iclass])
            if not np.isnan(f1score[iclass]):
                train_val_variables['avg_val_f1score'][iclass].append(f1score[iclass])

        # print
        format_str = ('%s: epoch %d, step %d/ %d (%.2f sec/step)')
        print(format_str % (datetime.now(), epoch, step + 1, nValBatches, duration))
        format_str = ('Validation Loss = %.2f, Precision = %.2f, Recall = %.2f, F1 = %.2f, ' +
                      'TP = %.2f, FP = %.2f, FN = %.2f, TN = %.2f')
        for iclass in range(flags['n_classes']):
            print(format_str % (loss_value_val, precision[iclass], recall[iclass],
                            f1score[iclass], TP[iclass], FP[iclass], FN[iclass], TN[iclass]))
示例#7
0
def training_loop(sess, define_graph_output, train_val_variables,
                  nTrainBatches, epoch, checkpoint_folder, flags):
    """
    training_loop contains the definition for the loop where the weights
    are iteratively modified to optimize the loss function for the training set

    param: sess
    param: define_graph_output
    param: train_val_variables
    param: nTrainBatches
    param: epoch
    param: checkpoint_folder
    param: n_classes
    return: none
    """

    print("into training loop")
    for step in xrange(nTrainBatches):

        start_time = time.time()

        print("GOES INTO BLACK HOLE")
        _, loss_value_train, precision, recall, f1score, TP, FP, FN, TN,\
            out_train, pred_train,mean_image,variance_image = \
            sess.run([define_graph_output['train_op'],
                      define_graph_output['loss_train']['avg_cross_entropy_log_loss'],
                      define_graph_output['accuracy_train_output']['precision'],
                      define_graph_output['accuracy_train_output']['recall'],
                      define_graph_output['accuracy_train_output']['f1score'],
                      define_graph_output['accuracy_train_output']['TP'],
                      define_graph_output['accuracy_train_output']['FP'],
                      define_graph_output['accuracy_train_output']['FN'],
                      define_graph_output['accuracy_train_output']['TN'],
                      define_graph_output['out_content_train'],
                      define_graph_output['sigmoid_all_train'],
                      define_graph_output['mean_image'],
                      define_graph_output['variance_image']
                      ],
                     feed_dict={define_graph_output['keep_prob']: 0.5})

        print("COMES OUT OF BLACK HOLE")
        duration = time.time() - start_time
        assert not np.isnan(loss_value_train), 'Model diverged with loss = NaN'

        if step % 100 == 0:
            matlab.save(os.path.join(checkpoint_folder, 'train_content.mat'), {
                'out_train': out_train,
                'pred_train': pred_train
            })

        # evaluate
        if not np.isnan(loss_value_train):
            train_val_variables['avg_train_loss'].append(loss_value_train)
        for iclass in range(flags['n_classes']):
            if not np.isnan(precision[iclass]):
                train_val_variables['avg_train_precision'][iclass].append(
                    precision[iclass])
            if not np.isnan(recall[iclass]):
                train_val_variables['avg_train_recall'][iclass].append(
                    recall[iclass])
            if not np.isnan(f1score[iclass]):
                train_val_variables['avg_train_f1score'][iclass].append(
                    f1score[iclass])

        # print
        format_str = ('%s: epoch %d, step %d/ %d (%.2f sec/step)')
        print(format_str %
              (datetime.now(), epoch, step + 1, nTrainBatches, duration))
        format_str = (
            'Training Loss = %.2f, Precision = %.2f, Recall = %.2f, F1 = %.2f, '
            + 'TP = %.2f, FP = %.2f, FN = %.2f, TN = %.2f')
        for iclass in range(flags['n_classes']):
            #if(iclass ==3): iclass = 4
            print(format_str % (loss_value_train, precision[iclass],
                                recall[iclass], f1score[iclass], TP[iclass],
                                FP[iclass], FN[iclass], TN[iclass]))
        print("out of training loop")