def run():
    fibro = 'data2/fibroblast.csv'
    myofi = 'data2/myofibroblast.csv'
    ## EDA
    ## Load.inspect(fibro)
    fibro_fe = Load.load(fibro)
    myofi_fe = Load.load(myofi)

    ## standardlize: MinMaxScaler
    fibro_fe = minmax_scale(fibro_fe)
    myofi_fe = minmax_scale(myofi_fe)
    print(np.shape(fibro_fe))
    print(np.shape(myofi_fe))
    save_var_mean(fibro_fe, 'before selection')
    save_var_mean(myofi_fe, 'before selection')

    ## feature selection: variance threshold
    fibro_fe = VarianceThreshold(threshold=0.01).fit_transform(fibro_fe)
    myofi_fe = VarianceThreshold(threshold=0.03).fit_transform(myofi_fe)
    print(np.shape(fibro_fe))
    print(np.shape(myofi_fe))
    save_var_mean(fibro_fe, 'after selection')
    save_var_mean(myofi_fe, 'after selection')

    ## dimension reduction: PCA
    fibro_fe = PCA(n_components=100).fit_transform(fibro_fe)
    myofi_fe = PCA(n_components=100).fit_transform(myofi_fe)
    print(np.shape(fibro_fe))
    print(np.shape(myofi_fe))
    save_var_mean(fibro_fe, 'after decomposition')
    save_var_mean(myofi_fe, 'after decomposition')

    ## build union dataset of two classes
    union_fe = np.concatenate((fibro_fe, myofi_fe), axis=0)
    union_lb = np.concatenate(
        (np.ones(len(fibro_fe)), np.zeros(len(myofi_fe))), axis=0)
    union_ds = np.c_[union_fe, union_lb]
    print(np.shape(union_ds))
    np.random.shuffle(union_ds)
    print(np.shape(union_ds))

    ## split testset and trainset
    train_fe, test_fe, train_lb, test_lb = train_test_split(union_fe,
                                                            union_lb,
                                                            train_size=0.9,
                                                            shuffle=True)
    print('shape of train feature: \t{}'.format(np.shape(train_fe)))
    print('shape of train label:   \t{}'.format(np.shape(train_lb)))
    print('shape of test  feature: \t{}'.format(np.shape(test_fe)))
    print('shape of test  label:   \t{}'.format(np.shape(test_lb)))

    ## train classifier
    model = xgb.XGBClassifier(learning_rate=0.01)
    model.fit(train_fe, train_lb)
    print(model.score(test_fe, test_lb))
def run(train_sample, train_label, test_sample, test_label, k):
    train_sample, train_sample_size = Load.loadSample(train_sample)
    train_label, train_label_size = Load.loadLabel(train_label)
    assert train_sample_size == train_label_size, 'train_sample_size does not match train_label_size'

    test_sample, test_sample_size = Load.loadSample(test_sample)
    test_label, test_label_size = Load.loadLabel(test_label)
    assert test_sample_size == test_label_size, 'test_sample_size does not match test_label_size'

    train_sample = Preprocess.normalize(train_sample).values.tolist()  # list
    test_sample = Preprocess.normalize(test_sample).values.tolist()  # list

    label_to_index = {
        label: index
        for index, label in enumerate(set(train_label['x'].tolist()))
    }
    train_index = Preprocess.labelMap(train_label, label_to_index)  # list
    test_index = Preprocess.labelMap(test_label, label_to_index)  # list

    correct_count = 0

    for i, one in enumerate(test_sample):
        euclid_dist = np.linalg.norm(np.array(one) - np.array(train_sample),
                                     axis=1)
        nn_idx = euclid_dist.argsort()[:k]

        nn_vote = []
        nn_decision = 0
        for idx in nn_idx:
            nn_vote.append(train_index[idx])  # for there are only 1 or 0
        if sum(nn_vote) > k / 2:
            # print(list(label_to_index.keys())[1])
            nn_decision = 1
        else:
            # print(list(label_to_index.keys())[0])
            nn_decision = 0
        # print(test_label.values.tolist()[i][0])
        if test_label.values.tolist()[i][0] == list(
                label_to_index.keys())[nn_decision]:
            # right
            correct_count += 1
    test_correct = correct_count / test_sample_size
    Log.log(filename, 'k: {}; correct rate: {}\n'.format(k, test_correct))
    return test_correct
Beispiel #3
0
 def select_files(b):
     """Generate instance of tkinter.filedialog.
     Parameters
     ----------
     b : obj:
         An instance of ipywidgets.widgets.Button
     """
     # Create Tk root
     root = Tk()
     # Hide the main window
     root.withdraw()
     # Raise the root to the top of all windows.
     root.call('wm', 'attributes', '.', '-topmost', True)
     # List of selected fileswill be set to b.value
     try:
         b.files = filedialog.askopenfilename(multiple=True)
         b.image = Load.load_image(b.files[0])
         print("selected %s" % b.image.get_file_name())
         b.description = b.label + " Selected"
         b.icon = "check-square-o"
         b.style.button_color = "lightgreen"
     except Exception as e:
         print(e)
Beispiel #4
0
def run(filename, train_sample, train_label, test_sample, test_label, title, M,
        thresh, CART_step):
    train_sample, train_sample_size = Load.loadSample(train_sample)
    train_label, train_label_size = Load.loadLabel(train_label)
    assert train_sample_size == train_label_size, 'train_sample_size does not match train_label_size'

    test_sample, test_sample_size = Load.loadSample(test_sample)
    test_label, test_label_size = Load.loadLabel(test_label)
    assert test_sample_size == test_label_size, 'test_sample_size does not match test_label_size'

    train_sample = Preprocess.normalize(train_sample,
                                        True).values.tolist()  # list
    test_sample = Preprocess.normalize(test_sample,
                                       True).values.tolist()  # list

    label_to_index = {
        label: index
        for index, label in enumerate(set(train_label['x'].tolist()))
    }
    train_index = Preprocess.labelMap(train_label, label_to_index)  # list
    test_index = Preprocess.labelMap(test_label, label_to_index)  # list

    input_size = len(train_sample[0])
    sample_size = len(train_sample)
    sample_weights = [1 / sample_size for _ in range(sample_size)]
    classifier_weights = []
    classifier_thresholds = []
    threshold_positions = []
    test_corrs = []
    test_times = [i + 1 for i in range(M)]

    for i in range(M):
        threshold, position, errors = Calc.CART(train_sample, train_index,
                                                sample_weights, thresh,
                                                CART_step)
        total_error = Calc.gentleError(np.array(sample_weights),
                                       np.array(errors))
        classifier_weights.append(round(Calc.classifierError(total_error), 3))
        classifier_thresholds.append(threshold)
        threshold_positions.append(position)
        sample_weights = Calc.updateVariableWeights(np.array(sample_weights),
                                                    total_error, errors)
        # print('errors: {}'.format(errors))
        # print('sample_weights: {}'.format(sample_weights))
        # print('classifier_threshold: {} in {}'.format(threshold, position))
        print('total_error: {}'.format(total_error))
        print('threshold_positions:   {}'.format(threshold_positions))
        print('classifier_thresholds: {}'.format(classifier_thresholds))
        print('classifier_weights:    {}'.format(classifier_weights))

        test_corr = 0
        test_size = len(test_sample)
        for sample, index in zip(test_sample, test_index):
            vote = 0
            for threshold, position, weight in zip(classifier_thresholds,
                                                   threshold_positions,
                                                   classifier_weights):
                if sample[position] >= threshold:
                    vote += weight
                elif sample[position] < threshold:
                    vote -= weight
            if vote >= 0 and index == 1:
                test_corr += 1
            elif vote < 0 and index == 0:
                test_corr += 1
        test_corrs.append(round(test_corr / test_size, 3))
        Log.log(filename, 'M: {}; correction: {}\n'.format(M, test_corrs[-1]))
        print(
            '-----------------thresh: {}; CART_step: {}; iter: {}-----------------'
            .format(thresh, CART_step, i + 1))

    Graph.draw(filename, test_times, test_corrs, test_times[-1], 1.0, title)
    return test_corrs
Beispiel #5
0
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import

from convolution_network import Network, CLASSIFIER_FILE_TFL, LearnData
from utils import Load, TEST_PATH, NETWORK_RESULT_DIR

model_path = "model/"


def create_network(progress_length=100):
    network = Network(progress_length)
    network.create_model()
    network.load(model_path + CLASSIFIER_FILE_TFL)
    return network


if __name__ == '__main__':
    network = create_network()
    data = LearnData(TEST_PATH)
    data.load_all()
    for mask, org in zip(data.masks.images, data.original.images):
        reconstructed = network.mark(org.image, mask.image,
                                     org.get_file_name())
        Load.save(
            TEST_PATH + "/" + NETWORK_RESULT_DIR + "/" + org.get_file_name(),
            reconstructed)
Beispiel #6
0
 def __init__(self, root_dir) -> None:
     self.original = Load("images", root_dir)
     self.masks = Load("mask", root_dir)
     self.manual = Load("manual1", root_dir)
Beispiel #7
0
class LearnData:
    def __init__(self, root_dir) -> None:
        self.original = Load("images", root_dir)
        self.masks = Load("mask", root_dir)
        self.manual = Load("manual1", root_dir)

    def load_all(self):
        for l in [self.original, self.manual, self.masks]:
            l.load_all()
        self.masks.threshold()
        self.manual.threshold()

    def zip_data(self):
        return zip(self.original.get_data(), self.manual.get_data(),
                   self.masks.get_data())

    @staticmethod
    def get_possible_points(mask):
        def to_corner(v):
            return v - MASK_SIZE // 2 - 1

        indexes = np.where(mask > 0)
        all_x, all_y = indexes
        indexes = [[to_corner(all_x[i]),
                    to_corner(all_y[i])] for i in range(0, len(all_x))]
        max_x = mask.shape[0] - MASK_SIZE - 1
        max_y = mask.shape[1] - MASK_SIZE - 1
        return [i for i in indexes if 0 <= i[0] < max_x and 0 <= i[1] < max_y]

    @staticmethod
    def normalize(l):
        return list(array(l) / 255)

    def prepare_learn_data(self):
        X = []
        Y = []
        for image, manual, mask in self.zip_data():
            possible_points = LearnData.get_possible_points(mask)
            max_index = len(possible_points) - 1
            for i in range(0, PHOTO_SAMPLES):
                result, sample = self.get_sample(image, manual, max_index,
                                                 possible_points)
                X.append(sample)
                Y.append(result)
        X = LearnData.normalize(X)
        Y = LearnData.normalize(Y)
        return shuffle(X, Y)

    @staticmethod
    def get_sample(image, manual, max_index, possible_points):
        flag = -1
        vein_flag = rand.randint(0, 2)
        while flag != vein_flag:
            start_x, start_y = possible_points[rand.randint(0, max_index)]
            end_x = start_x + MASK_SIZE
            end_y = start_y + MASK_SIZE
            center_x = start_x + int(MASK_SIZE / 2)
            center_y = start_y + int(MASK_SIZE / 2)
            sample = image[start_x:end_x, start_y:end_y]
            sample = array(sample).reshape(MASK_SIZE, MASK_SIZE, 1)
            value_in_center = manual[center_x, center_y]
            if value_in_center == 255:
                result = [1, 0]
                flag = rand.randint(1, 2)
            else:
                result = [0, 1]
                flag = 0
        return result, sample
Beispiel #8
0
def show_vessels(green):
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    contrast_enhanced_green_fundus = clahe.apply(green)
    r = open_and_close(contrast_enhanced_green_fundus, 5)
    r = open_and_close(r, 13)
    r = open_and_close(r, 27)
    without_original = cv2.subtract(r, contrast_enhanced_green_fundus)
    increased = clahe.apply(without_original)
    return increased


def open_and_close(img, radius):
    o1 = cv2.morphologyEx(img,
                          cv2.MORPH_OPEN,
                          cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                    (radius, radius)),
                          iterations=1)
    return cv2.morphologyEx(o1,
                            cv2.MORPH_CLOSE,
                            cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (radius, radius)),
                            iterations=1)


if __name__ == '__main__':
    data = LearnData(TEST_PATH)
    data.load_all()
    for img, mask in zip(data.original.images, data.masks.images):
        res = process(img.image, mask.image)
        Load.save(TEST_PATH + "/imgproc/" + img.get_file_name(), res)