def loss(self, model, preprocess=None, task=None):
        """Run Loss Statistics

        Parameters
        ----------
        model : class with predict method
            Model to test
        preprocess : np.array -> np.array
            Preprocessing method to run; if None, no preprocessing is performed
        task : Task
            Task to register the test under
        """

        if task is not None:
            task = Task()
        task.start(name='Tester', desc=self.desc)

        # Predict
        if preprocess is not None:
            predicted = model.predict(
                np.array([preprocess(x) for x in self.data]))
        else:
            predicted = model.predict(self.data)

        # Compute error
        self.error = self.__error(predicted, self.labels)

        # Print stats
        for key, value in self.error.items():
            task.print("{label}: {val}".format(label=LABELS[key], val=value))

        task.done(desc='Done running tests.')
        task.print('Time per test: {t}s'.format(t=task.runtime() /
                                                self.error['total']))
示例#2
0
    def __new(self, task):
        """Create new W and b"""

        if task is None:
            task = Task()
        task.start(name='Random Fourier Feature', desc=self.__str__())

        # Create feature
        self.create()

        task.done(self.W,
                  self.b,
                  desc="{desc} created".format(desc=self.__str__()))
示例#3
0
    def __new(self, task, cores):

        if task is None:
            task = Task()
        task.start(name='Random Binning Feature', desc=self.__str__())

        gen = task.pool(get_p_set, [self.d for _ in range(self.D)],
                        cores=cores,
                        process=True,
                        name='Random Binning Feature')

        self.delta = np.array([x[0] for x in gen], dtype=np.float32)
        self.mu = np.array([x[1] for x in gen], dtype=np.float32)

        task.done(self.delta,
                  self.mu,
                  desc="{desc} created".format(desc=self.__str__()))
    def __init__(self,
                 patients,
                 transform=[None, None],
                 feature=None,
                 cores=None,
                 p=1,
                 task=None,
                 process=False):

        if task is None:
            task = Task()
        task.start(name='IDC Dataset', desc='Loading Images...')

        if transform is None:
            transform = [None, None]

        if process:
            self.data, self.classes = task.pool(
                proc_wrapper,
                patients,
                process=True,
                shared_args=[transform[0], transform[1], p, feature],
                shared_init=pinit,
                name='Loader',
                recursive=True,
                reducer=proc_reducer,
                cores=cores)

        else:
            self.data, self.classes = task.pool(load_patient,
                                                patients,
                                                process=False,
                                                shared_kwargs={
                                                    'transform': transform,
                                                    'feature': feature,
                                                    'p': p
                                                },
                                                reducer=reducer,
                                                name='Loader',
                                                recursive=False,
                                                threads=cores)

        task.done(self.data,
                  self.classes,
                  desc="{n} images ({p}%) sampled from {k} patients".format(
                      n=self.classes.shape[0], k=len(patients), p=p * 100))
    def __init__(self, n, dataset, task=None):

        if task is None:
            task = Task()
        task.start(name='CKM', desc="Color K Means Clustering")

        self.n = n

        knndata = []
        for d in dataset.data:
            d = d.reshape(50, 50, 3)
            for i in range(10):
                x = random.randint(0, 49)
                y = random.randint(0, 49)
                knndata.append(d[x, y])

        self.kmeans = KMeans(n_clusters=n).fit(knndata).cluster_centers_

        task.done(desc="Generated K Means model (n={n})".format(n=self.n))
示例#6
0
def run(ptrain=0.01, ptest=0.1, ntrain=-25, ntest=25):

    import os
    putil.LOG_FILE = os.path.join(LOG_FILE_DIR, 'ksvm.txt')

    main = Task(name="Kernel SVM",
                desc='Kernel Support Vector Machine Baseline').start()

    dataset, validation_tester = make_trainset(cores=None,
                                               main=main,
                                               ptrain=float(ptrain))
    testset, tester = make_testset(cores=None, main=main, ptest=float(ptest))

    ksvm = train(dataset, main.subtask(name="Training KSVM"))

    tester.loss(ksvm, task=main)
    validation_tester.loss(ksvm, task=main)

    main.done("Program finished.")
def load_patient(patient, p=1, transform=None, feature=None, task=None):
    """Load a patient

    Parameters
    ----------
    args : [str, Task, array]
        [0] patient to load
        [1] task to register under
        [2] [float, class, mixed type[], function]
            [0] proportion of samples to load
            [1] feature generator class
            [2] feature arguments
            [3] image transform
    """

    # Set up task
    if task is None:
        task = Task()
    task.start(name='Loader', desc='Loading Patient {p}'.format(p=patient))

    # Config: pass in p, feature, transform
    load_args = {'p': p, 'feature': feature, 'transform': transform}

    try:
        class_0 = load_images(os.path.join(BASE_PATH, patient, '0'),
                              **load_args)
        class_1 = load_images(os.path.join(BASE_PATH, patient, '1'),
                              **load_args)

        classes = np.concatenate([
            np.zeros(class_0.shape[0], dtype=np.int8),
            np.ones(class_1.shape[0], dtype=np.int8)
        ])
        data = np.concatenate([class_0, class_1])

        task.done(data, classes, desc="loaded patient {p}".format(p=patient))
        return (data, classes)

    except Exception as e:
        task.error("error loading patient {p}: {e}".format(p=patient, e=e))
        task.done(desc="could not load patient {p}".format(p=patient))
        return (None, None)
示例#8
0
        main=main,
        **args.subdict('cores', 'ntrain', 'ptrain'))

    # Train model
    rfsvm = train(dataset, main.subtask())

    # Tester
    test_dataset, tester = make_testset(
        transform=rand_ft,
        feature=ckm.map if args.get('knn') else None,
        main=main,
        **args.subdict('cores', 'ntest', 'ptest'))

    # Run testers
    tester.loss(rfsvm, task=main)
    validation_tester.loss(rfsvm, task=main)

    # Save as JSON
    if os.path.exists(LOG_FILE_PATH + '.json'):
        main.acc_join()
        main.warn("Specified log file path already exists. "
                  "\"_1\" will be appended to end of file.")
        main.acc_join()
        if input('Overwrite? (Y/N) ') == 'Y':
            main.save(LOG_FILE_PATH + '.json')
    else:
        main.save(LOG_FILE_PATH + '.json')

    main.acc_join()
    main.done(desc="Program finished.")