def __init__(self, method, **kwarg):
        super(HistNormalization, self).__init__(method, **kwarg)

        target_path = "{}/data/{}".format(get_project_root(),
                                          kwarg["hist_target"])
        hist_target = np.load(target_path).item()
        self.hist_target = hist_target

        self._history = []
        self.enable_update = True

        if kwarg["hist_source"] is not None:
            print("reading histogram file ...")
            source_path = "{}/data/{}".format(get_project_root(),
                                              kwarg["hist_source"])
            print("reading histogram file: ", source_path)
            hist_source = np.load(source_path).item()

            LUT = []
            LUT.append(
                self._estimate_cumulative_cdf(hist_source["L"],
                                              hist_target["L"],
                                              start=0,
                                              end=100))
            LUT.append(
                self._estimate_cumulative_cdf(hist_source["A"],
                                              hist_target["A"],
                                              start=-128,
                                              end=127))
            LUT.append(
                self._estimate_cumulative_cdf(hist_source["B"],
                                              hist_target["B"],
                                              start=-128,
                                              end=127))
            self.LUT = LUT
            self.hist_source = hist_source
            self.hist_target = hist_target
        else:
            # 将使用Prepare过程进行初始化
            self.LUT = None
            self.hist_source = None
 def __init__(self, method, **kwarg):
     super(ACDNormalization, self).__init__(method, **kwarg)
     self._pn = 100000
     self._bs = 2000
     self._step_per_epoch = int(self._pn / self._bs)
     self._epoch = int(300 / self._step_per_epoch)
     # self._pn = 100000
     # self._bs = 500
     # self._step_per_epoch = 20
     # self._epoch = 15
     self.dc_txt = "{}/data/{}".format(get_project_root(), kwarg["dc_txt"])
     self.w_txt = "{}/data/{}".format(get_project_root(), kwarg["w_txt"])
     self.template_path = "{}/data/{}".format(get_project_root(),
                                              kwarg["template_path"])
     self._template_dc_mat = None
     self._template_w_mat = None
     # if(not os.path.exists(self.dc_txt) or not os.path.exists(self.w_txt)):
     #     self.generate()
     self.generate()
     self._template_dc_mat = np.loadtxt(self.dc_txt)
     self._template_w_mat = np.loadtxt(self.w_txt)
    def __init__(self, **kwarg):
        target_path = "{}/data/{}".format(get_project_root(), kwarg["hist_target"])
        hist_target = np.load(target_path).item()
        self.hist_target = hist_target
        self.opcode = 10

        if kwarg["hist_source"] is not None:
            print("reading histogram file ...")
            source_path = "{}/data/{}".format(get_project_root(), kwarg["hist_source"])
            print("reading histogram file: ", source_path)
            hist_source = np.load(source_path).item()

            LUT = []
            LUT.append(self._estimate_cumulative_cdf(hist_source["L"], hist_target["L"], start=0, end=100))
            LUT.append(self._estimate_cumulative_cdf(hist_source["A"], hist_target["A"], start=-128, end=127))
            LUT.append(self._estimate_cumulative_cdf(hist_source["B"], hist_target["B"], start=-128, end=127))
            self.LUT = LUT
            self.hist_source = hist_source
            self.hist_target = hist_target
        else:
            # 将使用Prepare过程进行初始化
            self.LUT = None
            self.hist_source = None
예제 #4
0
 def load_param(self, GLOBAL_SCALE, SLICES_ROOT_PATH, PATCHS_DICT,
                NUM_WORKERS):
     '''
     参数加载
     :param GLOBAL_SCALE: 全局视野下所使用的倍镜数,x1.25 = Level 5,x20 = Level 1, x40 = Level 0
     :param SLICES_ROOT_PATH: WSI切片所在的目录
     :param PATCHS_DICT: 训练集所在的目录
     :param NUM_WORKERS: CPU的工作线程数
     :return:
     '''
     self.GLOBAL_SCALE = GLOBAL_SCALE
     self.SLICES_ROOT_PATH = SLICES_ROOT_PATH
     self.PROJECT_ROOT = get_project_root()
     self.PATCHS_ROOT_PATH = PATCHS_DICT
     self.NUM_WORKERS = NUM_WORKERS
예제 #5
0
def load_data(params):
    '''
    Convenience function: reads from disk, downloads, or generates the data specified in params
    '''
    ProjectDir = get_project_root()
    # __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
    if params['dset'] == 'reuters':
        with h5py.File('../../data/reuters/reutersidf_total.h5', 'r') as f:
            x = np.asarray(f.get('data'), dtype='float32')
            y = np.asarray(f.get('labels'), dtype='float32')

            n_train = int(0.9 * len(x))
            x_train, x_test = x[:n_train], x[n_train:]
            y_train, y_test = y[:n_train], y[n_train:]
    elif params['dset'] == 'mnist':
        x_train, x_test, y_train, y_test = get_mnist()
    elif params['dset'] == 'cc':
        x_train, x_test, y_train, y_test = generate_cc(
            params.get('n'), params.get('noise_sig'),
            params.get('train_set_fraction'))
        x_train, x_test = pre_process(x_train, x_test,
                                      params.get('standardize'))
    elif params['dset'] == 'Agg788':
        x_train = np.genfromtxt(os.path.join(ProjectDir,
                                             'Agg788_Instances.csv'),
                                delimiter=",")
        x_test = np.ndarray(shape=(0, x_train.shape[1]), dtype=x_train.dtype)
        y_train = np.genfromtxt(os.path.join(ProjectDir, 'Agg788_labels.csv'),
                                delimiter=",")
        y_test = np.ndarray(shape=(0, ), dtype=y_train.dtype)
    elif params['dset'] == 'Comp399':
        x_train = np.genfromtxt(os.path.join(ProjectDir,
                                             'Comp399_Instances.csv'),
                                delimiter=",")
        x_test = np.ndarray(shape=(0, x_train.shape[1]), dtype=x_train.dtype)
        y_train = np.genfromtxt(os.path.join(ProjectDir, 'Comp399_labels.csv'),
                                delimiter=",")
        y_test = np.ndarray(shape=(0, ), dtype=y_train.dtype)
    else:
        raise ValueError('Dataset provided ({}) is invalid!'.format(
            params['dset']))

    return x_train, x_test, y_train, y_test
예제 #6
0
    def load_config_file(self, filename):
        '''
        读取配置文件
        :param filename: 配置文件名
        :return:
        '''
        # 读取数据
        with open(filename, 'r') as f:
            data = json.load(f)
        self.GLOBAL_SCALE = data[0]['GLOBAL_SCALE']

        self.KFB_SDK_PATH = data[1]['KFB_SDK_PATH']
        self.SLICES_ROOT_PATH = data[1]['SLICES_ROOT_PATH']

        self.PROJECT_ROOT = get_project_root()

        self.PATCHS_ROOT_PATH = dict(data[2])

        self.NUM_WORKERS = data[3]['NUM_WORKERS']

        return
예제 #7
0
    def save_default_value(self, filename):
        '''
        生成一个默认的配置文件,以便进行修改
        :param filename: 存盘的文件名
        :return:
        '''
        filePath = get_project_root() + "\\config\\" + filename
        data = ({
            'GLOBAL_SCALE': 1.25,
        }, {
            'KFB_SDK_PATH': 'D:/CloudSpace/WorkSpace/lib/KFB_SDK',
            'SLICES_ROOT_PATH': 'D:/Study/breast/3Plus',
        }, {
            "P0404": "D:/Data/Patches/P0404",
            "P0327": "D:/Data/Patches/P0327"
        }, {
            "NUM_WORKERS": 1,
        })

        # 写入 JSON 数据
        with open(filePath, 'w') as f:
            json.dump(data, f)

        return
예제 #8
0
def run_net(data, params):
    #
    # UNPACK DATA
    #

    x_train, y_train, x_val, y_val, x_test, y_test = data['spectral'][
        'train_and_test']
    x_train_unlabeled, y_train_unlabeled, x_train_labeled, y_train_labeled = data[
        'spectral']['train_unlabeled_and_labeled']
    x_val_unlabeled, y_val_unlabeled, x_val_labeled, y_val_labeled = data[
        'spectral']['val_unlabeled_and_labeled']

    if 'siamese' in params['affinity']:
        pairs_train, dist_train, pairs_val, dist_val = data['siamese'][
            'train_and_test']

    print("Training pairs = ", pairs_train.shape[0])
    print("Validation pairs = ", pairs_val.shape[0])
    print("Total pairs = ", pairs_train.shape[0] + pairs_val.shape[0])
    ProjectDir = get_project_root()
    with open(os.path.join(ProjectDir, 'Results.txt'), 'a') as my_file:
        my_file.write("\n")
        my_file.write("Training pairs = " + str(pairs_train.shape[0]))
        my_file.write("\n")
        my_file.write("Validation pairs = " + str(pairs_val.shape[0]))
        my_file.write("\n")
        my_file.write("Total pairs = " +
                      str(pairs_train.shape[0] + pairs_val.shape[0]))
        my_file.write("\n")

    x = np.concatenate((x_train, x_val, x_test), axis=0)
    y = np.concatenate((y_train, y_val, y_test), axis=0)

    if len(x_train_labeled):
        y_train_labeled_onehot = OneHotEncoder().fit_transform(
            y_train_labeled.reshape(-1, 1)).toarray()
    else:
        y_train_labeled_onehot = np.empty((0, len(np.unique(y))))

    #
    # SET UP INPUTS
    #

    # create true y placeholder (not used in unsupervised training)
    y_true = tf.placeholder(tf.float32,
                            shape=(None, params['n_clusters']),
                            name='y_true')

    batch_sizes = {
        'Unlabeled': params['batch_size'],
        'Labeled': params['batch_size'],
        'Orthonorm': params.get('batch_size_orthonorm', params['batch_size']),
    }

    input_shape = x.shape[1:]

    # spectralnet has three inputs -- they are defined here
    inputs = {
        'Unlabeled': Input(shape=input_shape, name='UnlabeledInput'),
        'Labeled': Input(shape=input_shape, name='LabeledInput'),
        'Orthonorm': Input(shape=input_shape, name='OrthonormInput'),
    }

    #
    # DEFINE AND TRAIN SIAMESE NET
    #

    # run only if we are using a siamese network
    if params['affinity'] == 'siamese':
        siamese_net = networks.SiameseNet(inputs, params['arch'],
                                          params.get('siam_reg'), y_true)

        history = siamese_net.train(pairs_train, dist_train, pairs_val,
                                    dist_val, params['siam_lr'],
                                    params['siam_drop'],
                                    params['siam_patience'], params['siam_ne'],
                                    params['siam_batch_size'])

    else:
        siamese_net = None

    #
    # DEFINE AND TRAIN SPECTRALNET
    #

    spectral_net = networks.SpectralNet(inputs, params['arch'],
                                        params.get('spec_reg'), y_true,
                                        y_train_labeled_onehot,
                                        params['n_clusters'],
                                        params['affinity'],
                                        params['scale_nbr'], params['n_nbrs'],
                                        batch_sizes, siamese_net, x_train,
                                        len(x_train_labeled))

    spectral_net.train(x_train_unlabeled, x_train_labeled, x_val_unlabeled,
                       params['spec_lr'], params['spec_drop'],
                       params['spec_patience'], params['spec_ne'])

    print("finished training")

    #
    # EVALUATE
    #

    # get final embeddings
    x_spectralnet = spectral_net.predict(x)

    # get accuracy and nmi
    kmeans_assignments, km = get_cluster_sols(x_spectralnet,
                                              ClusterClass=KMeans,
                                              n_clusters=params['n_clusters'],
                                              init_args={'n_init': 10})
    y_spectralnet, _ = get_y_preds(kmeans_assignments, y, params['n_clusters'])
    print_accuracy(kmeans_assignments, y, params['n_clusters'])
    # from sklearn.metrics import normalized_mutual_info_score as nmi
    # nmi_score = nmi(kmeans_assignments, y)
    # print('NMI: ' + str(np.round(nmi_score, 3)))

    # with open('C:/Users/mals6571/Desktop/SpectralNet-master/src/applications/Results.txt','a') as my_file:
    #     my_file.write('NMI: ' + str(np.round(nmi_score, 3)))
    #     my_file.write("\n")
    #     my_file.write('==============================================================')

    if params['generalization_metrics']:
        x_spectralnet_train = spectral_net.predict(x_train_unlabeled)
        x_spectralnet_test = spectral_net.predict(x_test)
        km_train = KMeans(
            n_clusters=params['n_clusters']).fit(x_spectralnet_train)
        from scipy.spatial.distance import cdist
        dist_mat = cdist(x_spectralnet_test, km_train.cluster_centers_)
        closest_cluster = np.argmin(dist_mat, axis=1)
        print_accuracy(closest_cluster, y_test, params['n_clusters'],
                       ' generalization')
        nmi_score = nmi(closest_cluster, y_test)
        print('generalization NMI: ' + str(np.round(nmi_score, 3)))

    return x_spectralnet, y_spectralnet
예제 #9
0
def process(x_spectralnet, y_spectralnet, data, params):
    # UNPACK DATA
    x_train, y_train, x_val, y_val, x_test, y_test = data['spectral']['train_and_test']

    # concatenate
    x = np.concatenate([x_train, x_val, x_test], axis=0)
    y = np.concatenate([y_train, y_val, y_test], axis=0)

    # PERFORM SPECTRAL CLUSTERING ON DATA

    # # get eigenvalues and eigenvectors
    # scale = get_scale(x, params['batch_size'], params['scale_nbr'])
    # values, vectors = spectral_clustering(x, scale, params['n_nbrs'], params['affinity'])

    # # sort, then store the top n_clusters=2
    # values_idx = np.argsort(values)
    # x_spectral_clustering = vectors[:, values_idx[:params['n_clusters']]]

    # # do kmeans clustering in this subspace
    # y_spectral_clustering = KMeans(n_clusters=params['n_clusters']).fit_predict(vectors[:, values_idx[:params['n_clusters']]])

    # PLOT RESULTS

    # plot spectral net clustering
    # fig2 = plt.figure()
    # if x.shape[1] == 2:
    #     ax1 = fig2.add_subplot(311)
    #     ax1.scatter(x[:, 0], x[:, 1],
    #                 alpha=0.5, s=20, cmap='rainbow', c=y_spectralnet, lw=0)
    
    if x.shape[1] == 2:
        plt.scatter(x[:, 0], x[:, 1],alpha=0.9, s=20, cmap='rainbow', c=y_spectralnet, lw=0)
        plt.axis("off")
        plotID = "%i" % randint(10,99)
        # plt.savefig('Result'+plotID+'.png', dpi=300)
        ProjectDir = get_project_root()
        plt.savefig(os.path.join(ProjectDir,'Result'+plotID+'.png'), dpi=300)
    
    # ax1.set_title("x colored by net prediction")

    # # plot spectral clustering clusters
    # if x.shape[1] == 2:
    #     ax2 = fig2.add_subplot(313)
    #     ax2.scatter(x[:, 0], x[:, 1],
    #                 alpha=0.5, s=20, cmap='rainbow', c=y_spectral_clustering, lw=0)
    # ax2.set_title("x colored by spectral clustering")

    # # plot histogram of eigenvectors
    # fig3 = plt.figure()
    # ax1 = fig3.add_subplot(212)
    # ax1.hist(x_spectral_clustering)
    # ax1.set_title("histogram of true eigenvectors")
    # ax2 = fig3.add_subplot(211)
    # ax2.hist(x_spectralnet)
    # ax2.set_title("histogram of net outputs")

    # # plot eigenvectors
    # y_idx = np.argsort(y)
    # fig4 = plt.figure()
    # ax1 = fig4.add_subplot(212)
    # ax1.plot(x_spectral_clustering[y_idx])
    # ax1.set_title("plot of true eigenvectors")
    # ax2 = fig4.add_subplot(211)
    # ax2.plot(x_spectralnet[y_idx])
    # ax2.set_title("plot of net outputs")

    plt.draw()
    plt.show()
예제 #10
0
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 12 22:52:41 2020

@author: mals6571
"""
import os
from core.util import get_project_root

ProjectDir = get_project_root()
for r in range(10):
    exec(open(os.path.join(ProjectDir, 'applications/run.py')).read())
예제 #11
0
 def test_get_project_root(self):
     print(util.get_project_root())