예제 #1
0
 def profile(profiler):
     with profiler:
         c1 = spawn(light)
         c2 = spawn(heavy)
         for c in [c1, c2]:
             join(c)
     stat1 = find_stats(profiler.stats, 'light')
     stat2 = find_stats(profiler.stats, 'heavy')
     return (stat1, stat2)
예제 #2
0
 def profile(profiler):
     with profiler:
         c1 = spawn(light)
         c2 = spawn(heavy)
         for c in [c1, c2]:
             join(c)
     stat1 = find_stats(profiler.stats, 'light')
     stat2 = find_stats(profiler.stats, 'heavy')
     return (stat1, stat2)
예제 #3
0
def _test_sampling_profiler(sampler):
    profiler = SamplingProfiler(top_frame=sys._getframe(), sampler=sampler)
    with profiler:
        spin_100ms()
        spin_500ms()
    stat1 = find_stats(profiler.stats, 'spin_100ms')
    stat2 = find_stats(profiler.stats, 'spin_500ms')
    ratio = stat1.deep_hits / stat2.deep_hits
    # 1:5 expaected, but tolerate (0.8~1.2):5
    assert 0.8 <= ratio * 5 <= 1.2
예제 #4
0
def _test_sampling_profiler(sampler):
    profiler = SamplingProfiler(top_frames=[sys._getframe()], sampler=sampler)
    with profiler:
        spin_100ms()
        spin_500ms()
    stat1 = find_stats(profiler.stats, 'spin_100ms')
    stat2 = find_stats(profiler.stats, 'spin_500ms')
    ratio = stat1.deep_hits / stat2.deep_hits
    # 1:5 expaected, but tolerate (0.8~1.2):5
    assert 0.8 <= ratio * 5 <= 1.2
예제 #5
0
def test_profile():
    profiler = TracingProfiler()
    frame = foo()
    profiler._profile(frame, 'call', None)
    profiler._profile(frame, 'return', None)
    assert len(profiler.stats) == 1
    stats1 = find_stats(profiler.stats, 'foo')
    stats2 = find_stats(profiler.stats, 'bar')
    stats3 = find_stats(profiler.stats, 'baz')
    assert stats1.own_hits == 0
    assert stats2.own_hits == 0
    assert stats3.own_hits == 1
    assert stats1.deep_hits == 1
    assert stats2.deep_hits == 1
    assert stats3.deep_hits == 1
예제 #6
0
def test_profile():
    profiler = TracingProfiler()
    frame = foo()
    profiler._profile(frame, "call", None)
    profiler._profile(frame, "return", None)
    assert len(profiler.stats) == 1
    stats1 = find_stats(profiler.stats, "foo")
    stats2 = find_stats(profiler.stats, "bar")
    stats3 = find_stats(profiler.stats, "baz")
    assert stats1.own_hits == 0
    assert stats2.own_hits == 0
    assert stats3.own_hits == 1
    assert stats1.deep_hits == 1
    assert stats2.deep_hits == 1
    assert stats3.deep_hits == 1
예제 #7
0
def test_profile():
    profiler = TracingProfiler()
    frame = foo()
    profiler._profile(frame, 'call', None)
    profiler._profile(frame, 'return', None)
    assert len(profiler.stats) == 1
    stats1 = find_stats(profiler.stats, 'foo')
    stats2 = find_stats(profiler.stats, 'bar')
    stats3 = find_stats(profiler.stats, 'baz')
    assert stats1.own_hits == 0
    assert stats2.own_hits == 0
    assert stats3.own_hits == 1
    assert stats1.deep_hits == 1
    assert stats2.deep_hits == 1
    assert stats3.deep_hits == 1
예제 #8
0
def test_profiler():
    profiler = TracingProfiler(top_frames=[sys._getframe()])
    assert isinstance(profiler.stats, RecordingStatistics)
    stats, cpu_time, wall_time = profiler.result()
    assert len(stats) == 0
    with profiler:
        factorial(1000)
        factorial(10000)
    stats1 = find_stats(profiler.stats, 'factorial')
    stats2 = find_stats(profiler.stats, '__enter__')
    stats3 = find_stats(profiler.stats, '__exit__')
    assert stats1.deep_time != 0
    assert stats1.deep_time == stats1.own_time
    assert stats1.own_time > stats2.own_time
    assert stats1.own_time > stats3.own_time
    assert stats1.own_hits == 2
    assert stats2.own_hits == 0  # entering to __enter__() wasn't profiled.
    assert stats3.own_hits == 1
예제 #9
0
def test_profiler():
    profiler = TracingProfiler(top_frames=[sys._getframe()])
    assert isinstance(profiler.stats, RecordingStatistics)
    stats, cpu_time, wall_time = profiler.result()
    assert len(stats) == 0
    with profiler:
        factorial(1000)
        factorial(10000)
    stats1 = find_stats(profiler.stats, 'factorial')
    stats2 = find_stats(profiler.stats, '__enter__')
    stats3 = find_stats(profiler.stats, '__exit__')
    assert stats1.deep_time != 0
    assert stats1.deep_time == stats1.own_time
    assert stats1.own_time > stats2.own_time
    assert stats1.own_time > stats3.own_time
    assert stats1.own_hits == 2
    assert stats2.own_hits == 0  # entering to __enter__() wasn't profiled.
    assert stats3.own_hits == 1
예제 #10
0
def kmeans_test():
    '''

    :return:
    '''

    clusters_request = get_kmeans_clusters()
    if clusters_request["error"] != None:
        raise (clusters_request["error"])
    clusters = clusters_request["data"]
    cluster_distances = get_all_kmeans_cluster_distances_dictionary()
    cluster_lengths = [len(cluster.ingredients) for cluster in clusters]
    kmeans_stats = find_stats(cluster_lengths)
    with open(os.getcwd() + '/app/test/kmeans_clusters.txt', 'w') as textfile:
        textfile.write("Size stats\n")
        textfile.write("-------------------------\n")
        textfile.write("Mean: " + str(kmeans_stats["mean"]))
        textfile.write("\n")
        textfile.write("Median: " + str(kmeans_stats["median"]))
        textfile.write("\n")
        textfile.write("St Dev: " + str(kmeans_stats["stdev"]))
        textfile.write("\n")
        textfile.write("Quartile 1: " + str(kmeans_stats["qt1"]))
        textfile.write("\n")
        textfile.write("Quartile 2: " + str(kmeans_stats["qt2"]))
        textfile.write("\n")
        textfile.write("-------------------------\n")
        textfile.write("\n")
        for x, cluster in enumerate(clusters):
            textfile.write("Cluster " + str(x) + "\n")
            textfile.write("-------------------------\n")
            textfile.write("Cluster size: " + str(len(cluster.ingredients)) +
                           "\n")
            textfile.write(",".join(cluster.get_ingredient_strings()) + "\n")
            textfile.write("-------------------------\n")
            textfile.write("\n")
        textfile.write("\n")
        for i, entry in enumerate(cluster_distances):
            textfile.write("Distances from Cluster " + str(i) + "\n")
            textfile.write("-------------------------\n")
            for key, value in sorted(entry.items(), key=lambda item: item[1]):
                textfile.write("%s: %s \n" % (key, value))
            textfile.write("-------------------------\n")
            textfile.write("\n")
예제 #11
0
    def transform(self, rand=False, mean=0, sd=1):

        ### JUST ENTER THE PATH!
        dst = self.test_train_dst

        mean, sd = find_stats(self.img_path)

        train_l, test_l = split_im(src_img=self.img_path,
                                   src_label=self.label_path,
                                   dst=dst)

        count = -1

        img_test = torch.Tensor(test_l, 1, 64, 64)
        label_test = torch.Tensor(test_l, 4096)

        for i in os.listdir(dst + '/' + 'test_img'):
            count += 1

            temp_im = plt.imread(dst + '/test_img/' + i)
            temp_im -= mean
            temp_im /= sd

            img_test[count, :, :, :] = torch.Tensor(
                scipy.misc.imresize(temp_im, (64, 64)) / 256.0)

            if (not (img_test[count, 0, :, :].max()
                     == img_test[count, 0, :, :].min())):
                img_test[count,
                         0, :, :] = (img_test[count, 0, :, :] -
                                     img_test[count, 0, :, :].min()) / (
                                         img_test[count, 0, :, :].max() -
                                         img_test[count, 0, :, :].min())

            temp_im = scipy.misc.imresize(
                plt.imread(dst + '/' + 'test_label' + '/' + i), (64, 64))
            temp_im[temp_im > 0] = 1.0
            label_test[count, :] = torch.Tensor(
                temp_im.astype(float)).view(4096)

        dsets1 = {'Test': torch.utils.data.TensorDataset(img_test, label_test)}

        dset_loaders1 = {
            'Test':
            torch.utils.data.DataLoader(dsets1['Test'],
                                        batch_size=test_l,
                                        shuffle=False,
                                        num_workers=4)
        }
        dset_sizes1 = {'Test': len(dsets1['Test'])}

        with open(dst + '/test_loader_st.p', 'wb') as f:
            pickle.dump(dset_loaders1, f)

        with open(dst + '/test_size_st.p', 'wb') as f:
            pickle.dump(dset_sizes1, f)

        label_train = torch.zeros(train_l, 4096)
        img_train = torch.zeros(train_l, 1, 64, 64)

        count = -1

        for i in os.listdir(dst + '/' + 'train_img'):
            count += 1

            temp_im = plt.imread(dst + '/train_img/' + i)
            temp_im -= mean
            temp_im /= sd

            img_train[count, :, :, :] = torch.Tensor(
                scipy.misc.imresize(temp_im, (64, 64)) / 256.0)
            if (not (img_train[count, 0, :, :].max()
                     == img_train[count, 0, :, :].min())):
                img_train[count,
                          0, :, :] = (img_train[count, 0, :, :] -
                                      img_train[count, 0, :, :].min()) / (
                                          img_train[count, 0, :, :].max() -
                                          img_train[count, 0, :, :].min())

            temp_im = scipy.misc.imresize(
                plt.imread(dst + '/' + 'train_label' + '/' + i), (64, 64))
            temp_im[temp_im > 0] = 1.0
            label_train[count, :] = torch.Tensor(
                temp_im.astype(float)).view(4096)

        if (self.use_all):
            dsets = {
                'Training':
                torch.utils.data.TensorDataset(img_train, label_train)
            }

            dset_loaders = {
                'Training':
                torch.utils.data.DataLoader(dsets['Training'],
                                            batch_size=(train_l),
                                            shuffle=False,
                                            num_workers=4)
            }
        else:
            dsets = {
                'Training':
                torch.utils.data.TensorDataset(img_train, label_train)
            }

            dset_loaders = {
                'Training':
                torch.utils.data.DataLoader(dsets['Training'],
                                            batch_size=self.b_size,
                                            shuffle=False,
                                            num_workers=4)
            }

        dset_sizes = {'Training': len(dsets['Training'])}

        with open(dst + '/train_loader_st.p', 'wb') as f:
            pickle.dump(dset_loaders1, f)

        with open(dst + '/train_size_st.p', 'wb') as f:
            pickle.dump(dset_sizes1, f)

        ### TODO maybe use os.listdir to get the made folders ?? or make the folders on the fly based on test_only input
        return dset_loaders, dset_sizes
예제 #12
0
    def transform(self, rand=False, patch_path='./', mean=0, sd=1):

        if (self.test_fraction == 0):

            train_series, val_series = get_series(self.img_path, 0)
        else:
            train_series, val_series, test_series = get_series(
                self.img_path, self.test_fraction)

        temp_im = plt.imread(self.img_path + '/' + train_series[0] + '.png')

        label_train = torch.zeros(len(train_series), self.n_out * self.n_out)
        img_train = torch.zeros(len(train_series), 1, self.n_in, self.n_in)

        label_val = torch.zeros(len(val_series), self.n_out * self.n_out)
        img_val = torch.zeros(len(val_series), 1, self.n_in, self.n_in)

        dim = temp_im.shape
        count = -1

        mean, sd = find_stats(self.img_path)

        ### VAL SET

        for i in val_series[:int(len(val_series))]:
            count += 1

            if (not (dim[0] == self.n_in)):
                temp_im2 = plt.imread(self.img_path + '/' + i + '.png')
                #temp_im2 -= mean
                #temp_im2 /= sd
                #print(temp_im.max())
                temp_im = scipy.misc.imresize(
                    temp_im2, (self.n_in, self.n_in)).astype(float)
                #print(temp_im.max())
                temp_im = temp_im.reshape(1, self.n_in, self.n_in)

                img_val[count, :, :, :] = torch.Tensor(
                    temp_im.astype(float)) / 255.0
            else:
                #temp_im-=mean
                #temp_im /=sd
                #print('here')

                temp_im = plt.imread(self.img_path + '/' + i +
                                     '.png').astype(float)
                #print(temp_im.max())
                temp_im = temp_im.reshape(1, self.n_in, self.n_in)
                #print(temp_im.max())
                img_val[count, :, :, :] = torch.Tensor(temp_im)

            if (img_val[count, 0, :, :].max() != img_val[count,
                                                         0, :, :].min()):

                img_val[count, 0, :, :] = (img_val[count, 0, :, :] -
                                           img_val[count, 0, :, :].min()) / (
                                               img_val[count, 0, :, :].max() -
                                               img_val[count, 0, :, :].min())

            #img_train[count,:,:,:].sub(mean).div(sd)

            if not (self.label_path == 0):
                label_val[count, :] = torch.Tensor(
                    scipy.misc.imresize(
                        plt.imread(self.label_path + '/' + i + '.png')))

        count = -1
        ### TRAIN SET
        for i in train_series[:len(train_series)]:
            count += 1

            if (not (dim[0] == self.n_in)):
                temp_im2 = plt.imread(self.img_path + '/' + i + '.png')
                #temp_im2 -= mean
                #temp_im2 /= sd
                #print(temp_im.max())
                temp_im = scipy.misc.imresize(
                    temp_im2, (self.n_in, self.n_in)).astype(float)
                #print(temp_im.max())
                temp_im = temp_im.reshape(1, self.n_in, self.n_in)

                img_train[count, :, :, :] = torch.Tensor(
                    temp_im.astype(float)) / 255.0
            else:
                #temp_im-=mean
                #temp_im /=sd
                #print('here')

                temp_im = plt.imread(self.img_path + '/' + i +
                                     '.png').astype(float)
                #print(temp_im.max())
                temp_im = temp_im.reshape(1, self.n_in, self.n_in)
                #print(temp_im.max())
                img_train[count, :, :, :] = torch.Tensor(temp_im)

            if (img_train[count, 0, :, :].max() != img_train[count,
                                                             0, :, :].min()):
                #print('here')
                img_train[count,
                          0, :, :] = (img_train[count, 0, :, :] -
                                      img_train[count, 0, :, :].min()) / (
                                          img_train[count, 0, :, :].max() -
                                          img_train[count, 0, :, :].min())

            #img_train[count,:,:,:].sub(mean).div(sd)

            if not (self.label_path == 0):
                label_train[count, :] = torch.Tensor(
                    scipy.misc.imresize(
                        plt.imread(self.label_path + '/' + i + '.png')))

        if (patch_path == './'):
            patch_path = self.img_path

        for i, j, k in os.walk(self.img_path):
            if '.ipynb_checkpoints' in i:
                #print(i)
                shutil.rmtree(i)

        dsets = {
            'Training': torch.utils.data.TensorDataset(img_train, label_train),
            'Val': torch.utils.data.TensorDataset(img_val, label_val)
        }

        if (not (self.use_all)):

            dset_loaders = {
                'Training':
                torch.utils.data.DataLoader(
                    dsets['Training'],
                    #batch_size=self.b_size,
                    batch_size=self.b_size,
                    shuffle=False,
                    num_workers=4),
                'Val':
                torch.utils.data.DataLoader(
                    dsets['Val'],
                    #batch_size=self.b_size,
                    batch_size=self.b_size,
                    shuffle=False,
                    num_workers=4),
            }
        else:
            dset_loaders = {
                'Training':
                torch.utils.data.DataLoader(
                    dsets['Training'],
                    #batch_size=self.b_size,
                    batch_size=len(train_series),
                    shuffle=False,
                    num_workers=4),
                'Val':
                torch.utils.data.DataLoader(
                    dsets['Val'],
                    #batch_size=self.b_size,
                    batch_size=len(val_series),
                    shuffle=False,
                    num_workers=4),
            }

        dset_sizes = {
            'Training': len(dsets['Training']),
            'Val': len(dsets['Val'])
        }
        ### TODO maybe use os.listdir to get the made folders ?? or make the folders on the fly based on test_only input
        return dset_loaders, dset_sizes
예제 #13
0
    def transform(self, rand=False, patch_path='./', mean=0, sd=1, test_on=0):
        if (self.test_fraction == 0):
            train_series = get_series(self.label_path, self.test_fraction)

        else:
            train_series, test_series = get_series(self.label_path,
                                                   self.test_fraction)

            label_test = torch.zeros(len(test_series), 1024)

            img_test = torch.zeros(len(test_series), 1, 64, 64)

        dim = plt.imread(self.img_path + '/' + train_series[0] + '.png').shape

        label_train = torch.zeros(len(train_series), 1024)

        img_train = torch.zeros(len(train_series), 1, 64, 64)

        count = -1

        mean, sd = find_stats(self.img_path)
        self.b_size = len(train_series)

        for i in test_series:
            count += 1

            temp_im = plt.imread(self.img_path + '/' + i + '.png')
            #print(i)
            #print(temp_im.min())
            temp_im -= mean
            temp_im /= sd
            #plt.imshow(scipy.misc.imresize(temp_im,(64,64))),plt.show()
            temp_im = scipy.misc.imresize(temp_im, (64, 64))
            temp_im = temp_im.reshape(1, 64, 64).astype('float')

            temp_im = torch.Tensor(temp_im)
            #print(temp_im.size())
            img_test[count, :, :, :] = temp_im

            if not (self.label_path == 0):
                temp_im2 = scipy.misc.imresize(
                    plt.imread(self.label_path + '/' + i + '.png'),
                    (32, 32)).astype(float)
                #plt.imshow(temp_im2),plt.show()

                temp_im2[np.where(np.abs(temp_im2 - 0) > 0)] = 1.0

                #plt.imshow(temp_im2),plt.show()
                temp_im2 = torch.Tensor(temp_im2)
                label_test[count, :] = (temp_im2).view(1024)

        count = -1
        for i in train_series:
            count += 1

            temp_im = plt.imread(self.img_path + '/' + i + '.png')

            #print(temp_im.min())
            temp_im -= mean
            temp_im /= sd
            #plt.imshow(scipy.misc.imresize(temp_im,(64,64))),plt.show()
            temp_im = scipy.misc.imresize(temp_im, (64, 64))
            temp_im = temp_im.reshape(1, 64, 64).astype('float')

            #print(temp_im.shape)
            #print(type(temp_im))
            temp_im = torch.Tensor(temp_im)
            #print(temp_im.size())
            img_train[count, :, :, :] = temp_im

            if not (self.label_path == 0):
                temp_im2 = scipy.misc.imresize(
                    plt.imread(self.label_path + '/' + i + '.png'),
                    (32, 32)).astype(float)
                #plt.imshow(temp_im2),plt.show()
                #print(temp_im2.min())
                #print(temp_im2.max())
                temp_im2[np.where(np.abs(temp_im2 - 0) > 0)] = 1

                #plt.imshow(temp_im2),plt.show()
                temp_im2 = torch.Tensor(temp_im2)
                label_train[count, :] = (temp_im2).view(1024)

        dsets = {
            'Training': torch.utils.data.TensorDataset(img_train, label_train)
        }

        dset_loaders = {
            'Training':
            torch.utils.data.DataLoader(dsets['Training'],
                                        batch_size=self.b_size,
                                        shuffle=False,
                                        num_workers=4)
        }

        dset_sizes = {'Training': len(dsets['Training'])}

        dsets1 = {'Test': torch.utils.data.TensorDataset(img_test, label_test)}
        dset_loaders1 = {
            'Test':
            torch.utils.data.DataLoader(dsets1['Test'],
                                        batch_size=self.b_size,
                                        shuffle=False,
                                        num_workers=4)
        }
        dset_sizes1 = {'Test': len(dsets1['Test'])}

        with open('/home/gam2018/cached_data/test.p', 'wb') as f:

            pickle.dump(dset_loaders1, f)

        with open('/home/gam2018/cached_data/test_size.p', 'wb') as f:

            pickle.dump(dset_sizes1, f)

        return dset_loaders, dset_sizes