Example #1
0
def test_cluster_map_get_small_and_large_clusters():
    rng = np.random.RandomState(42)
    nb_clusters = 11
    cluster_map = ClusterMap()

    # Randomly generate small clusters
    indices = [rng.randint(0, 10, size=i) for i in range(1, nb_clusters + 1)]
    small_clusters = [Cluster(indices=indices[i]) for i in range(nb_clusters)]
    cluster_map.add_cluster(*small_clusters)

    # Randomly generate small clusters
    indices = [
        rng.randint(0, 10, size=i)
        for i in range(nb_clusters + 1, 2 * nb_clusters + 1)
    ]
    large_clusters = [Cluster(indices=indices[i]) for i in range(nb_clusters)]
    cluster_map.add_cluster(*large_clusters)

    assert_equal(len(cluster_map), 2 * nb_clusters)
    assert_equal(len(cluster_map.get_small_clusters(nb_clusters)),
                 len(small_clusters))
    assert_arrays_equal(cluster_map.get_small_clusters(nb_clusters),
                        small_clusters)
    assert_equal(len(cluster_map.get_large_clusters(nb_clusters + 1)),
                 len(large_clusters))
    assert_arrays_equal(cluster_map.get_large_clusters(nb_clusters + 1),
                        large_clusters)
Example #2
0
def test_cluster_map_centroid_iter():
    rng = np.random.RandomState(42)
    nb_clusters = 11

    cluster_map = ClusterMapCentroid()
    clusters = []
    for i in range(nb_clusters):
        new_centroid = np.zeros_like(features)
        new_cluster = ClusterCentroid(new_centroid,
                                      indices=rng.randint(0,
                                                          len(data),
                                                          size=10))
        cluster_map.add_cluster(new_cluster)
        clusters.append(new_cluster)

    assert_true(
        all([c1 is c2 for c1, c2 in zip(cluster_map.clusters, clusters)]))
    assert_array_equal(cluster_map, clusters)
    assert_array_equal(cluster_map.clusters, clusters)
    assert_array_equal(cluster_map, [cluster.indices for cluster in clusters])

    # Set refdata
    cluster_map.refdata = data
    for c1, c2 in zip(cluster_map, clusters):
        assert_arrays_equal(c1, [data[i] for i in c2.indices])
Example #3
0
def test_cluster_map_get_small_and_large_clusters():
    rng = np.random.RandomState(42)
    nb_clusters = 11
    cluster_map = ClusterMap()

    # Randomly generate small clusters
    indices = [rng.randint(0, 10, size=i) for i in range(1, nb_clusters+1)]
    small_clusters = [Cluster(indices=indices[i]) for i in range(nb_clusters)]
    cluster_map.add_cluster(*small_clusters)

    # Randomly generate small clusters
    indices = [rng.randint(0, 10, size=i)
               for i in range(nb_clusters+1, 2*nb_clusters+1)]
    large_clusters = [Cluster(indices=indices[i]) for i in range(nb_clusters)]
    cluster_map.add_cluster(*large_clusters)

    assert_equal(len(cluster_map), 2*nb_clusters)
    assert_equal(len(cluster_map.get_small_clusters(nb_clusters)),
                 len(small_clusters))
    assert_arrays_equal(cluster_map.get_small_clusters(nb_clusters),
                        small_clusters)
    assert_equal(len(cluster_map.get_large_clusters(nb_clusters+1)),
                 len(large_clusters))
    assert_arrays_equal(cluster_map.get_large_clusters(nb_clusters+1),
                        large_clusters)
Example #4
0
def test_cluster_map_iter():
    rng = np.random.RandomState(42)
    nb_clusters = 11

    # Test without specifying refdata in ClusterMap
    cluster_map = ClusterMap()
    clusters = []
    for i in range(nb_clusters):
        new_cluster = Cluster(indices=rng.randint(0, len(data), size=10))
        cluster_map.add_cluster(new_cluster)
        clusters.append(new_cluster)

    assert_true(all([c1 is c2 for c1, c2 in zip(cluster_map.clusters,
                                                clusters)]))
    assert_array_equal(cluster_map, clusters)
    assert_array_equal(cluster_map.clusters, clusters)
    assert_array_equal(cluster_map, [cluster.indices for cluster in clusters])

    # Set refdata
    cluster_map.refdata = data
    for c1, c2 in zip(cluster_map, clusters):
        assert_arrays_equal(c1, [data[i] for i in c2.indices])

    # Remove refdata, i.e. back to indices
    cluster_map.refdata = None
    assert_array_equal(cluster_map, [cluster.indices for cluster in clusters])
Example #5
0
def test_cluster_map_iter():
    rng = np.random.RandomState(42)
    nb_clusters = 11

    # Test without specifying refdata in ClusterMap
    cluster_map = ClusterMap()
    clusters = []
    for i in range(nb_clusters):
        new_cluster = Cluster(indices=rng.randint(0, len(data), size=10))
        cluster_map.add_cluster(new_cluster)
        clusters.append(new_cluster)

    assert_true(
        all([c1 is c2 for c1, c2 in zip(cluster_map.clusters, clusters)]))
    assert_array_equal(cluster_map, clusters)
    assert_array_equal(cluster_map.clusters, clusters)
    assert_array_equal(cluster_map, [cluster.indices for cluster in clusters])

    # Set refdata
    cluster_map.refdata = data
    for c1, c2 in zip(cluster_map, clusters):
        assert_arrays_equal(c1, [data[i] for i in c2.indices])

    # Remove refdata, i.e. back to indices
    cluster_map.refdata = None
    assert_array_equal(cluster_map, [cluster.indices for cluster in clusters])
Example #6
0
def bench_quickbundles():
    dtype = "float32"
    repeat = 10
    nb_points = 18

    streams, hdr = nib.trackvis.read(get_data('fornix'))
    fornix = [s[0].astype(dtype) for s in streams]
    fornix = streamline_utils.set_number_of_points(fornix, nb_points)

    # Create eight copies of the fornix to be clustered (one in each octant).
    streamlines = []
    streamlines += [s + np.array([100, 100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([100, -100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([100, 100, -100], dtype) for s in fornix]
    streamlines += [s + np.array([100, -100, -100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, 100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, -100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, 100, -100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, -100, -100], dtype) for s in fornix]

    # The expected number of clusters of the fornix using threshold=10 is 4.
    threshold = 10.
    expected_nb_clusters = 4 * 8

    print("Timing QuickBundles 1.0 vs. 2.0")

    qb = QB_Old(streamlines, threshold, pts=None)
    qb1_time = measure("QB_Old(streamlines, threshold, nb_points)", repeat)
    print("QuickBundles time: {0:.4}sec".format(qb1_time))
    assert_equal(qb.total_clusters, expected_nb_clusters)
    sizes1 = [qb.partitions()[i]['N'] for i in range(qb.total_clusters)]
    indices1 = [
        qb.partitions()[i]['indices'] for i in range(qb.total_clusters)
    ]

    qb2 = QB_New(threshold)
    qb2_time = measure("clusters = qb2.cluster(streamlines)", repeat)
    print("QuickBundles2 time: {0:.4}sec".format(qb2_time))
    print("Speed up of {0}x".format(qb1_time / qb2_time))
    clusters = qb2.cluster(streamlines)
    sizes2 = map(len, clusters)
    indices2 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(sizes2, sizes1)
    assert_arrays_equal(indices2, indices1)

    qb = QB_New(threshold, metric=MDFpy())
    qb3_time = measure("clusters = qb.cluster(streamlines)", repeat)
    print("QuickBundles2_python time: {0:.4}sec".format(qb3_time))
    print("Speed up of {0}x".format(qb1_time / qb3_time))
    clusters = qb.cluster(streamlines)
    sizes3 = map(len, clusters)
    indices3 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(sizes3, sizes1)
    assert_arrays_equal(indices3, indices1)
Example #7
0
def bench_quickbundles():
    dtype = "float32"
    repeat = 10
    nb_points = 12

    streams, hdr = nib.trackvis.read(get_fnames('fornix'))
    fornix = [s[0].astype(dtype) for s in streams]
    fornix = streamline_utils.set_number_of_points(fornix, nb_points)

    # Create eight copies of the fornix to be clustered (one in each octant).
    streamlines = []
    streamlines += [s + np.array([100, 100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([100, -100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([100, 100, -100], dtype) for s in fornix]
    streamlines += [s + np.array([100, -100, -100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, 100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, -100, 100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, 100, -100], dtype) for s in fornix]
    streamlines += [s + np.array([-100, -100, -100], dtype) for s in fornix]

    # The expected number of clusters of the fornix using threshold=10 is 4.
    threshold = 10.
    expected_nb_clusters = 4 * 8

    print("Timing QuickBundles 1.0 vs. 2.0")

    qb = QB_Old(streamlines, threshold, pts=None)
    qb1_time = measure("QB_Old(streamlines, threshold, nb_points)", repeat)
    print("QuickBundles time: {0:.4}sec".format(qb1_time))
    assert_equal(qb.total_clusters, expected_nb_clusters)
    sizes1 = [qb.partitions()[i]['N'] for i in range(qb.total_clusters)]
    indices1 = [qb.partitions()[i]['indices']
                for i in range(qb.total_clusters)]

    qb2 = QB_New(threshold)
    qb2_time = measure("clusters = qb2.cluster(streamlines)", repeat)
    print("QuickBundles2 time: {0:.4}sec".format(qb2_time))
    print("Speed up of {0}x".format(qb1_time / qb2_time))
    clusters = qb2.cluster(streamlines)
    sizes2 = map(len, clusters)
    indices2 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(list(sizes2), sizes1)
    assert_arrays_equal(indices2, indices1)

    qb = QB_New(threshold, metric=MDFpy())
    qb3_time = measure("clusters = qb.cluster(streamlines)", repeat)
    print("QuickBundles2_python time: {0:.4}sec".format(qb3_time))
    print("Speed up of {0}x".format(qb1_time / qb3_time))
    clusters = qb.cluster(streamlines)
    sizes3 = map(len, clusters)
    indices3 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(list(sizes3), sizes1)
    assert_arrays_equal(indices3, indices1)
Example #8
0
def test_cluster_iter():
    indices = list(range(len(data)))
    np.random.shuffle(indices)  # None trivial ordering

    # Test without specifying refdata
    cluster = Cluster()
    cluster.assign(*indices)
    assert_array_equal(cluster.indices, indices)
    assert_array_equal(list(cluster), indices)

    # Test with specifying refdata in ClusterMap
    cluster.refdata = data
    assert_arrays_equal(list(cluster), [data[i] for i in indices])
Example #9
0
def test_cluster_iter():
    indices = list(range(len(data)))
    np.random.shuffle(indices)  # None trivial ordering

    # Test without specifying refdata
    cluster = Cluster()
    cluster.assign(*indices)
    assert_array_equal(cluster.indices, indices)
    assert_array_equal(list(cluster), indices)

    # Test with specifying refdata in ClusterMap
    cluster.refdata = data
    assert_arrays_equal(list(cluster), [data[i] for i in indices])
Example #10
0
def test_cluster_map_getitem():
    nb_clusters = 11
    indices = list(range(nb_clusters))
    np.random.shuffle(indices)  # None trivial ordering
    advanced_indices = indices + [0, 1, 2, -1, -2, -3]

    cluster_map = ClusterMap()
    clusters = []
    for i in range(nb_clusters):
        new_cluster = Cluster(indices=range(i))
        cluster_map.add_cluster(new_cluster)
        clusters.append(new_cluster)

    # Test indexing
    for i in advanced_indices:
        assert_true(cluster_map[i] == clusters[i])

    # Test advanced indexing
    assert_arrays_equal(cluster_map[advanced_indices],
                        [clusters[i] for i in advanced_indices])

    # Test index out of bounds
    assert_raises(IndexError, cluster_map.__getitem__, len(clusters))
    assert_raises(IndexError, cluster_map.__getitem__, -len(clusters) - 1)

    # Test slicing and negative indexing
    assert_equal(cluster_map[-1], clusters[-1])
    assert_array_equal(np.array(cluster_map[::2], dtype=object),
                       np.array(clusters[::2], dtype=object))
    assert_arrays_equal(cluster_map[::-1], clusters[::-1])
    assert_arrays_equal(cluster_map[:-1], clusters[:-1])
    assert_arrays_equal(cluster_map[1:], clusters[1:])
Example #11
0
def test_cluster_map_centroid_getitem():
    nb_clusters = 11
    indices = list(range(len(data)))
    np.random.shuffle(indices)  # None trivial ordering
    advanced_indices = indices + [0, 1, 2, -1, -2, -3]

    cluster_map = ClusterMapCentroid()
    clusters = []
    for i in range(nb_clusters):
        centroid = np.zeros_like(features)
        cluster = ClusterCentroid(centroid)
        cluster.id = cluster_map.add_cluster(cluster)
        clusters.append(cluster)

    # Test indexing
    for i in advanced_indices:
        assert_equal(cluster_map[i], clusters[i])

    # Test advanced indexing
    assert_arrays_equal(cluster_map[advanced_indices],
                        [clusters[i] for i in advanced_indices])

    # Test index out of bounds
    assert_raises(IndexError, cluster_map.__getitem__, len(clusters))
    assert_raises(IndexError, cluster_map.__getitem__, -len(clusters)-1)

    # Test slicing and negative indexing
    assert_equal(cluster_map[-1], clusters[-1])
    assert_array_equal(cluster_map[::2], clusters[::2])
    assert_arrays_equal(cluster_map[::-1], clusters[::-1])
    assert_arrays_equal(cluster_map[:-1], clusters[:-1])
    assert_arrays_equal(cluster_map[1:], clusters[1:])
Example #12
0
def test_cluster_map_centroid_getitem():
    nb_clusters = 11
    indices = list(range(len(data)))
    np.random.shuffle(indices)  # None trivial ordering
    advanced_indices = indices + [0, 1, 2, -1, -2, -3]

    cluster_map = ClusterMapCentroid()
    clusters = []
    for i in range(nb_clusters):
        centroid = np.zeros_like(features)
        cluster = ClusterCentroid(centroid)
        cluster.id = cluster_map.add_cluster(cluster)
        clusters.append(cluster)

    # Test indexing
    for i in advanced_indices:
        assert_equal(cluster_map[i], clusters[i])

    # Test advanced indexing
    assert_arrays_equal(cluster_map[advanced_indices],
                        [clusters[i] for i in advanced_indices])

    # Test index out of bounds
    assert_raises(IndexError, cluster_map.__getitem__, len(clusters))
    assert_raises(IndexError, cluster_map.__getitem__, -len(clusters) - 1)

    # Test slicing and negative indexing
    assert_equal(cluster_map[-1], clusters[-1])
    assert_array_equal(cluster_map[::2], clusters[::2])
    assert_arrays_equal(cluster_map[::-1], clusters[::-1])
    assert_arrays_equal(cluster_map[:-1], clusters[:-1])
    assert_arrays_equal(cluster_map[1:], clusters[1:])
Example #13
0
def test_cluster_centroid_iter():
    indices = list(range(len(data)))
    np.random.shuffle(indices)  # None trivial ordering

    # Test without specifying refdata in ClusterCentroid
    centroid = np.zeros(features_shape)
    cluster = ClusterCentroid(centroid)
    for idx in indices:
        cluster.assign(idx, (idx + 1) * features)

    assert_array_equal(cluster.indices, indices)
    assert_array_equal(list(cluster), indices)

    # Test with specifying refdata in ClusterCentroid
    cluster.refdata = data
    assert_arrays_equal(list(cluster), [data[i] for i in indices])
Example #14
0
def test_cluster_centroid_iter():
    indices = list(range(len(data)))
    np.random.shuffle(indices)  # None trivial ordering

    # Test without specifying refdata in ClusterCentroid
    centroid = np.zeros(features_shape)
    cluster = ClusterCentroid(centroid)
    for idx in indices:
        cluster.assign(idx, (idx+1)*features)

    assert_array_equal(cluster.indices, indices)
    assert_array_equal(list(cluster), indices)

    # Test with specifying refdata in ClusterCentroid
    cluster.refdata = data
    assert_arrays_equal(list(cluster), [data[i] for i in indices])
Example #15
0
def test_quickbundles_shape_uncompatibility():
    # QuickBundles' old default metric (AveragePointwiseEuclideanMetric,
    #  aka MDF) requires that all streamlines have the same number of points.
    metric = dipymetric.AveragePointwiseEuclideanMetric()
    qb = QuickBundles(threshold=20., metric=metric)
    assert_raises(ValueError, qb.cluster, data)

    # QuickBundles' new default metric (AveragePointwiseEuclideanMetric,
    # aka MDF combined with ResampleFeature) will automatically resample
    # streamlines so they all have 18 points.
    qb = QuickBundles(threshold=20.)
    clusters1 = qb.cluster(data)

    feature = dipymetric.ResampleFeature(nb_points=18)
    metric = dipymetric.AveragePointwiseEuclideanMetric(feature)
    qb = QuickBundles(threshold=20., metric=metric)
    clusters2 = qb.cluster(data)

    assert_arrays_equal(list(itertools.chain(*clusters1)),
                        list(itertools.chain(*clusters2)))
Example #16
0
def test_quickbundles_shape_uncompatibility():
    # QuickBundles' old default metric (AveragePointwiseEuclideanMetric,
    #  aka MDF) requires that all streamlines have the same number of points.
    metric = dipymetric.AveragePointwiseEuclideanMetric()
    qb = QuickBundles(threshold=20., metric=metric)
    assert_raises(ValueError, qb.cluster, data)

    # QuickBundles' new default metric (AveragePointwiseEuclideanMetric,
    # aka MDF combined with ResampleFeature) will automatically resample
    # streamlines so they all have 18 points.
    qb = QuickBundles(threshold=20.)
    clusters1 = qb.cluster(data)

    feature = dipymetric.ResampleFeature(nb_points=18)
    metric = dipymetric.AveragePointwiseEuclideanMetric(feature)
    qb = QuickBundles(threshold=20., metric=metric)
    clusters2 = qb.cluster(data)

    assert_arrays_equal(list(itertools.chain(*clusters1)),
                        list(itertools.chain(*clusters2)))
Example #17
0
def test_cluster_map_centroid_iter():
    rng = np.random.RandomState(42)
    nb_clusters = 11

    cluster_map = ClusterMapCentroid()
    clusters = []
    for i in range(nb_clusters):
        new_centroid = np.zeros_like(features)
        new_cluster = ClusterCentroid(new_centroid,
                                      indices=rng.randint(0, len(data),
                                                          size=10))
        cluster_map.add_cluster(new_cluster)
        clusters.append(new_cluster)

    assert_true(all([c1 is c2 for c1, c2 in
                     zip(cluster_map.clusters, clusters)]))
    assert_array_equal(cluster_map, clusters)
    assert_array_equal(cluster_map.clusters, clusters)
    assert_array_equal(cluster_map, [cluster.indices for cluster in clusters])

    # Set refdata
    cluster_map.refdata = data
    for c1, c2 in zip(cluster_map, clusters):
        assert_arrays_equal(c1, [data[i] for i in c2.indices])
Example #18
0
def test_ui_file_menu_2d(interactive=False):
    filename = "test_ui_file_menu_2d"
    recording_filename = pjoin(DATA_DIR, filename + ".log.gz")
    expected_events_counts_filename = pjoin(DATA_DIR, filename + ".pkl")

    # Create temporary directory and files
    os.mkdir(os.path.join(os.getcwd(), "testdir"))
    os.chdir("testdir")
    os.mkdir(os.path.join(os.getcwd(), "tempdir"))
    for i in range(10):
        open(os.path.join(os.getcwd(), "tempdir", "test" + str(i) + ".txt"),
             'wt').close()
    open("testfile.txt", 'wt').close()

    filemenu = ui.FileMenu2D(size=(500, 500), extensions=["txt"],
                             directory_path=os.getcwd())

    # We will collect the sequence of files that have been selected.
    selected_files = []

    def _on_change():
        selected_files.append(list(filemenu.listbox.selected))

    # Set up a callback when selection changes.
    filemenu.listbox.on_change = _on_change

    # Assign the counter callback to every possible event.
    event_counter = EventCounter()
    event_counter.monitor(filemenu)

    # Create a show manager and record/play events.
    show_manager = window.ShowManager(size=(600, 600),
                                      title="DIPY FileMenu")
    show_manager.ren.add(filemenu)

    # Recorded events:
    #  1. Click on 'testfile.txt'
    #  2. Click on 'tempdir/'
    #  3. Click on 'test0.txt'.
    #  4. Shift + Click on 'test6.txt'.
    #  5. Click on '../'.
    #  2. Click on 'testfile.txt'.
    show_manager.play_events_from_file(recording_filename)
    expected = EventCounter.load(expected_events_counts_filename)
    event_counter.check_counts(expected)

    # Check if the right files were selected.
    expected = [["testfile.txt"], ["tempdir"], ["test0.txt"],
                ["test0.txt", "test1.txt", "test2.txt", "test3.txt",
                 "test4.txt", "test5.txt", "test6.txt"],
                ["../"], ["testfile.txt"]]
    assert len(selected_files) == len(expected)
    assert_arrays_equal(selected_files, expected)

    # Remove temporary directory and files
    os.remove("testfile.txt")
    for i in range(10):
        os.remove(os.path.join(os.getcwd(), "tempdir",
                               "test" + str(i) + ".txt"))
    os.rmdir(os.path.join(os.getcwd(), "tempdir"))
    os.chdir("..")
    os.rmdir("testdir")

    if interactive:
        filemenu = ui.FileMenu2D(size=(500, 500), directory_path=os.getcwd())
        show_manager = window.ShowManager(size=(600, 600),
                                          title="DIPY FileMenu")
        show_manager.ren.add(filemenu)
        show_manager.start()
Example #19
0
def test_select_by_rois():
    streamlines = [np.array([[0, 0., 0.9],
                             [1.9, 0., 0.]]),
                   np.array([[0.1, 0., 0],
                             [0, 1., 1.],
                             [0, 2., 2.]]),
                   np.array([[2, 2, 2],
                             [3, 3, 3]])]

    # Make two ROIs:
    mask1 = np.zeros((4, 4, 4), dtype=bool)
    mask2 = np.zeros_like(mask1)
    mask1[0, 0, 0] = True
    mask2[1, 0, 0] = True

    selection = select_by_rois(streamlines, [mask1], [True],
                               tol=1)

    assert_arrays_equal(list(selection), [streamlines[0],
                        streamlines[1]])

    selection = select_by_rois(streamlines, [mask1, mask2], [True, True],
                               tol=1)

    assert_arrays_equal(list(selection), [streamlines[0],
                        streamlines[1]])

    selection = select_by_rois(streamlines, [mask1, mask2], [True, False])

    assert_arrays_equal(list(selection), [streamlines[1]])

    # Setting tolerance too low gets overridden:
    selection = select_by_rois(streamlines, [mask1, mask2], [True, False],
                               tol=0.1)
    assert_arrays_equal(list(selection), [streamlines[1]])

    selection = select_by_rois(streamlines, [mask1, mask2], [True, True],
                               tol=0.87)

    assert_arrays_equal(list(selection), [streamlines[1]])

    mask3 = np.zeros_like(mask1)
    mask3[0, 2, 2] = 1
    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False], tol=1.0)

    assert_arrays_equal(list(selection), [streamlines[0]])

    # Select using only one ROI
    selection = select_by_rois(streamlines, [mask1], [True], tol=0.87)
    assert_arrays_equal(list(selection), [streamlines[1]])

    selection = select_by_rois(streamlines, [mask1], [True], tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0],
                        streamlines[1]])

    # Use different modes:
    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False],
                               mode="all",
                               tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0]])

    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False],
                               mode="either_end",
                               tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0]])

    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False],
                               mode="both_end",
                               tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0]])

    mask2[0, 2, 2] = True
    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False],
                               mode="both_end",
                               tol=1.0)

    assert_arrays_equal(list(selection), [streamlines[0],
                                          streamlines[1]])

    # Test with generator input:
    selection = select_by_rois(generate_sl(streamlines), [mask1], [True],
                               tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0],
                        streamlines[1]])
Example #20
0
def test_cluster_centroid_getitem():
    indices = list(range(len(data)))
    np.random.shuffle(indices)  # None trivial ordering
    advanced_indices = indices + [0, 1, 2, -1, -2, -3]

    # Test without specifying refdata in ClusterCentroid
    centroid = np.zeros(features_shape)
    cluster = ClusterCentroid(centroid)
    for idx in indices:
        cluster.assign(idx, (idx + 1) * features)

    # Test indexing
    for i in advanced_indices:
        assert_equal(cluster[i], indices[i])

    # Test advanced indexing
    assert_array_equal(cluster[advanced_indices],
                       [indices[i] for i in advanced_indices])

    # Test index out of bounds
    assert_raises(IndexError, cluster.__getitem__, len(cluster))
    assert_raises(IndexError, cluster.__getitem__, -len(cluster) - 1)

    # Test slicing and negative indexing
    assert_equal(cluster[-1], indices[-1])
    assert_array_equal(cluster[::2], indices[::2])
    assert_arrays_equal(cluster[::-1], indices[::-1])
    assert_arrays_equal(cluster[:-1], indices[:-1])
    assert_arrays_equal(cluster[1:], indices[1:])

    # Test with specifying refdata in ClusterCentroid
    cluster.refdata = data

    # Test indexing
    for i in advanced_indices:
        assert_array_equal(cluster[i], data[indices[i]])

    # Test advanced indexing
    assert_arrays_equal(cluster[advanced_indices],
                        [data[indices[i]] for i in advanced_indices])

    # Test index out of bounds
    assert_raises(IndexError, cluster.__getitem__, len(cluster))
    assert_raises(IndexError, cluster.__getitem__, -len(cluster) - 1)

    # Test slicing and negative indexing
    assert_array_equal(cluster[-1], data[indices[-1]])
    assert_arrays_equal(cluster[::2], [data[i] for i in indices[::2]])
    assert_arrays_equal(cluster[::-1], [data[i] for i in indices[::-1]])
    assert_arrays_equal(cluster[:-1], [data[i] for i in indices[:-1]])
    assert_arrays_equal(cluster[1:], [data[i] for i in indices[1:]])
Example #21
0
def test_select_by_rois():
    streamlines = [
        np.array([[0, 0., 0.9], [1.9, 0., 0.]]),
        np.array([[0.1, 0., 0], [0, 1., 1.], [0, 2., 2.]]),
        np.array([[2, 2, 2], [3, 3, 3]])
    ]

    # Make two ROIs:
    mask1 = np.zeros((4, 4, 4), dtype=bool)
    mask2 = np.zeros_like(mask1)
    mask1[0, 0, 0] = True
    mask2[1, 0, 0] = True

    selection = select_by_rois(streamlines, [mask1], [True], tol=1)

    assert_arrays_equal(list(selection), [streamlines[0], streamlines[1]])

    selection = select_by_rois(streamlines, [mask1, mask2], [True, True],
                               tol=1)

    assert_arrays_equal(list(selection), [streamlines[0], streamlines[1]])

    selection = select_by_rois(streamlines, [mask1, mask2], [True, False])

    assert_arrays_equal(list(selection), [streamlines[1]])

    # Setting tolerance too low gets overridden:
    selection = select_by_rois(streamlines, [mask1, mask2], [True, False],
                               tol=0.1)
    assert_arrays_equal(list(selection), [streamlines[1]])

    selection = select_by_rois(streamlines, [mask1, mask2], [True, True],
                               tol=0.87)

    assert_arrays_equal(list(selection), [streamlines[1]])

    mask3 = np.zeros_like(mask1)
    mask3[0, 2, 2] = 1
    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False],
                               tol=1.0)

    assert_arrays_equal(list(selection), [streamlines[0]])

    # Select using only one ROI
    selection = select_by_rois(streamlines, [mask1], [True], tol=0.87)
    assert_arrays_equal(list(selection), [streamlines[1]])

    selection = select_by_rois(streamlines, [mask1], [True], tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0], streamlines[1]])

    # Use different modes:
    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False],
                               mode="all",
                               tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0]])

    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False],
                               mode="either_end",
                               tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0]])

    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False],
                               mode="both_end",
                               tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0]])

    mask2[0, 2, 2] = True
    selection = select_by_rois(streamlines, [mask1, mask2, mask3],
                               [True, True, False],
                               mode="both_end",
                               tol=1.0)

    assert_arrays_equal(list(selection), [streamlines[0], streamlines[1]])

    # Test with generator input:
    selection = select_by_rois(generate_sl(streamlines), [mask1], [True],
                               tol=1.0)
    assert_arrays_equal(list(selection), [streamlines[0], streamlines[1]])
Example #22
0
def bench_quickbundles():
    dtype = "float32"
    repeat = 10
    nb_points = 12

    fname = get_fnames('fornix')

    fornix = load_tractogram(fname, 'same', bbox_valid_check=False).streamlines

    fornix_streamlines = Streamlines(fornix)
    fornix_streamlines = set_number_of_points(fornix_streamlines, nb_points)

    # Create eight copies of the fornix to be clustered (one in each octant).
    streamlines = []
    streamlines += [
        s + np.array([100, 100, 100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([100, -100, 100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([100, 100, -100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([100, -100, -100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([-100, 100, 100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([-100, -100, 100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([-100, 100, -100], dtype) for s in fornix_streamlines
    ]
    streamlines += [
        s + np.array([-100, -100, -100], dtype) for s in fornix_streamlines
    ]

    # The expected number of clusters of the fornix using threshold=10 is 4.
    threshold = 10.
    expected_nb_clusters = 4 * 8

    print("Timing QuickBundles 1.0 vs. 2.0")

    qb2 = QB_New(threshold)
    qb2_time = measure("clusters = qb2.cluster(streamlines)", repeat)
    print("QuickBundles2 time: {0:.4}sec".format(qb2_time))
    print("Speed up of {0}x".format(qb1_time / qb2_time))
    clusters = qb2.cluster(streamlines)
    sizes2 = map(len, clusters)
    indices2 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(list(sizes2), sizes1)
    assert_arrays_equal(indices2, indices1)

    qb = QB_New(threshold, metric=MDFpy())
    qb3_time = measure("clusters = qb.cluster(streamlines)", repeat)
    print("QuickBundles2_python time: {0:.4}sec".format(qb3_time))
    print("Speed up of {0}x".format(qb1_time / qb3_time))
    clusters = qb.cluster(streamlines)
    sizes3 = map(len, clusters)
    indices3 = map(lambda c: c.indices, clusters)
    assert_equal(len(clusters), expected_nb_clusters)
    assert_array_equal(list(sizes3), sizes1)
    assert_arrays_equal(indices3, indices1)
Example #23
0
def test_cluster_getitem():
    indices = list(range(len(data)))
    np.random.shuffle(indices)  # None trivial ordering
    advanced_indices = indices + [0, 1, 2, -1, -2, -3]

    # Test without specifying refdata in ClusterMap
    cluster = Cluster()
    cluster.assign(*indices)

    # Test indexing
    for i in advanced_indices:
        assert_equal(cluster[i], indices[i])

    # Test advanced indexing
    assert_array_equal(cluster[advanced_indices], [indices[i] for i in advanced_indices])

    # Test index out of bounds
    assert_raises(IndexError, cluster.__getitem__, len(cluster))
    assert_raises(IndexError, cluster.__getitem__, -len(cluster)-1)

    # Test slicing and negative indexing
    assert_equal(cluster[-1], indices[-1])
    assert_array_equal(cluster[::2], indices[::2])
    assert_arrays_equal(cluster[::-1], indices[::-1])
    assert_arrays_equal(cluster[:-1], indices[:-1])
    assert_arrays_equal(cluster[1:], indices[1:])

    # Test with specifying refdata in ClusterMap
    cluster.refdata = data

    # Test indexing
    for i in advanced_indices:
        assert_array_equal(cluster[i], data[indices[i]])

    # Test advanced indexing
    assert_array_equal(cluster[advanced_indices], [data[indices[i]] for i in advanced_indices])

    # Test index out of bounds
    assert_raises(IndexError, cluster.__getitem__, len(cluster))
    assert_raises(IndexError, cluster.__getitem__, -len(cluster)-1)

    # Test slicing and negative indexing
    assert_array_equal(cluster[-1], data[indices[-1]])
    assert_arrays_equal(cluster[::2], [data[i] for i in indices[::2]])
    assert_arrays_equal(cluster[::-1], [data[i] for i in indices[::-1]])
    assert_arrays_equal(cluster[:-1], [data[i] for i in indices[:-1]])
    assert_arrays_equal(cluster[1:], [data[i] for i in indices[1:]])
Example #24
0
def test_ui_listbox_2d(interactive=False):

    filename = "test_ui_listbox_2d"
    recording_filename = pjoin(DATA_DIR, filename + ".log.gz")
    expected_events_counts_filename = pjoin(DATA_DIR, filename + ".pkl")

    # Values that will be displayed by the listbox.
    values = list(range(1, 42 + 1))

    if interactive:
        listbox = ui.ListBox2D(values=values,
                               size=(500, 500),
                               multiselection=True,
                               reverse_scrolling=False)
        listbox.center = (300, 300)

        show_manager = window.ShowManager(size=(600, 600),
                                          title="DIPY ListBox")
        show_manager.ren.add(listbox)
        show_manager.start()

    # Recorded events:
    #  1. Click on 1
    #  2. Ctrl + click on 2,
    #  3. Ctrl + click on 2.
    #  4. Use scroll bar to scroll to the bottom.
    #  5. Click on 42.
    #  6. Use scroll bar to scroll to the top.
    #  7. Click on 1
    #  8. Use mouse wheel to scroll down.
    #  9. Shift + click on 42.
    # 10. Use mouse wheel to scroll back up.

    listbox = ui.ListBox2D(values=values,
                           size=(500, 500),
                           multiselection=True,
                           reverse_scrolling=False)
    listbox.center = (300, 300)

    # We will collect the sequence of values that have been selected.
    selected_values = []

    def _on_change():
        selected_values.append(list(listbox.selected))

    # Set up a callback when selection changes.
    listbox.on_change = _on_change

    # Assign the counter callback to every possible event.
    event_counter = EventCounter()
    event_counter.monitor(listbox)

    show_manager = window.ShowManager(size=(600, 600),
                                      title="DIPY ListBox")
    show_manager.ren.add(listbox)
    show_manager.play_events_from_file(recording_filename)
    expected = EventCounter.load(expected_events_counts_filename)
    event_counter.check_counts(expected)

    # Check if the right values were selected.
    expected = [[1], [1, 2], [1], [42], [1], values]
    assert len(selected_values) == len(expected)
    assert_arrays_equal(selected_values, expected)

    # Test without multiselection enabled.
    listbox.multiselection = False
    del selected_values[:]  # Clear the list.
    show_manager.play_events_from_file(recording_filename)

    # Check if the right values were selected.
    expected = [[1], [2], [2], [42], [1], [42]]
    assert len(selected_values) == len(expected)
    assert_arrays_equal(selected_values, expected)
Example #25
0
def test_ui_listbox_2d(recording=False):
    filename = "test_ui_listbox_2d"
    recording_filename = pjoin(DATA_DIR, filename + ".log.gz")
    expected_events_counts_filename = pjoin(DATA_DIR, filename + ".pkl")

    # Values that will be displayed by the listbox.
    values = list(range(1, 42 + 1))
    listbox = ui.ListBox2D(values=values,
                           size=(500, 500),
                           multiselection=True,
                           reverse_scrolling=False)
    listbox.center = (300, 300)

    # We will collect the sequence of values that have been selected.
    selected_values = []

    def _on_change():
        selected_values.append(list(listbox.selected))

    # Set up a callback when selection changes.
    listbox.on_change = _on_change

    # Assign the counter callback to every possible event.
    event_counter = EventCounter()
    event_counter.monitor(listbox)

    # Create a show manager and record/play events.
    show_manager = window.ShowManager(size=(600, 600), title="DIPY ListBox")
    show_manager.ren.add(listbox)

    if recording:
        # Record the following events:
        #  1. Click on 1
        #  2. Ctrl + click on 2,
        #  3. Ctrl + click on 2.
        #  4. Click on down arrow (4 times).
        #  5. Click on 21.
        #  6. Click on up arrow (5 times).
        #  7. Click on 1
        #  8. Use mouse wheel to scroll down.
        #  9. Shift + click on 42.
        # 10. Use mouse wheel to scroll back up.
        show_manager.record_events_to_file(recording_filename)
        print(list(event_counter.events_counts.items()))
        event_counter.save(expected_events_counts_filename)

    else:
        show_manager.play_events_from_file(recording_filename)
        expected = EventCounter.load(expected_events_counts_filename)
        event_counter.check_counts(expected)

    # Check if the right values were selected.
    expected = [[1], [1, 2], [1], [21], [1], values]
    assert len(selected_values) == len(expected)
    assert_arrays_equal(selected_values, expected)

    # Test without multiselection enabled.
    listbox.multiselection = False
    del selected_values[:]  # Clear the list.
    show_manager.play_events_from_file(recording_filename)

    # Check if the right values were selected.
    expected = [[1], [2], [2], [21], [1], [42]]
    assert len(selected_values) == len(expected)
    assert_arrays_equal(selected_values, expected)
Example #26
0
def test_ui_radio_button(interactive=False):
    filename = "test_ui_radio_button"
    recording_filename = pjoin(DATA_DIR, filename + ".log.gz")
    expected_events_counts_filename = pjoin(DATA_DIR, filename + ".pkl")

    radio_button_test = ui.RadioButton(
        labels=["option 1", "option 2\nOption 2", "option 3", "option 4"],
        position=(10, 10))

    old_positions = []
    for option in radio_button_test.options:
        old_positions.append(option.position)
    old_positions = np.asarray(old_positions)
    radio_button_test.position = (100, 100)
    new_positions = []
    for option in radio_button_test.options:
        new_positions.append(option.position)
    new_positions = np.asarray(new_positions)
    npt.assert_allclose(new_positions - old_positions, 90 * np.ones((4, 2)))

    selected_option = []

    def _on_change(radio_button):
        selected_option.append(radio_button.checked)

    # Set up a callback when selection changes
    radio_button_test.on_change = _on_change

    event_counter = EventCounter()
    event_counter.monitor(radio_button_test)

    # Create a show manager and record/play events.
    show_manager = window.ShowManager(size=(600, 600), title="DIPY Checkbox")
    show_manager.ren.add(radio_button_test)

    # Recorded events:
    #  1. Click on button of option 1.
    #  2. Click on button of option 2.
    #  3. Click on button of option 2.
    #  4. Click on text of option 2.
    #  5. Click on button of option 1.
    #  6. Click on text of option 3.
    #  7. Click on button of option 4.
    #  8. Click on text of option 4.
    show_manager.play_events_from_file(recording_filename)
    expected = EventCounter.load(expected_events_counts_filename)
    event_counter.check_counts(expected)

    # Check if the right options were selected.
    expected = [['option 1'], ['option 2\nOption 2'], ['option 2\nOption 2'],
                ['option 2\nOption 2'], ['option 1'], ['option 3'],
                ['option 4'], ['option 4']]
    assert len(selected_option) == len(expected)
    assert_arrays_equal(selected_option, expected)
    del show_manager

    if interactive:
        radio_button_test = ui.RadioButton(
            labels=["option 1", "option 2\nOption 2", "option 3", "option 4"],
            position=(100, 100))
        showm = window.ShowManager(size=(600, 600))
        showm.ren.add(radio_button_test)
        showm.start()
Example #27
0
def test_ui_file_menu_2d(interactive=False):
    filename = "test_ui_file_menu_2d"
    recording_filename = pjoin(DATA_DIR, filename + ".log.gz")
    expected_events_counts_filename = pjoin(DATA_DIR, filename + ".pkl")

    # Create temporary directory and files
    os.mkdir(os.path.join(os.getcwd(), "testdir"))
    os.chdir("testdir")
    os.mkdir(os.path.join(os.getcwd(), "tempdir"))
    for i in range(10):
        open(os.path.join(os.getcwd(), "tempdir", "test" + str(i) + ".txt"),
             'wt').close()
    open("testfile.txt", 'wt').close()

    filemenu = ui.FileMenu2D(size=(500, 500),
                             extensions=["txt"],
                             directory_path=os.getcwd())

    # We will collect the sequence of files that have been selected.
    selected_files = []

    def _on_change():
        selected_files.append(list(filemenu.listbox.selected))

    # Set up a callback when selection changes.
    filemenu.listbox.on_change = _on_change

    # Assign the counter callback to every possible event.
    event_counter = EventCounter()
    event_counter.monitor(filemenu)

    # Create a show manager and record/play events.
    show_manager = window.ShowManager(size=(600, 600), title="DIPY FileMenu")
    show_manager.ren.add(filemenu)

    # Recorded events:
    #  1. Click on 'testfile.txt'
    #  2. Click on 'tempdir/'
    #  3. Click on 'test0.txt'.
    #  4. Shift + Click on 'test6.txt'.
    #  5. Click on '../'.
    #  2. Click on 'testfile.txt'.
    show_manager.play_events_from_file(recording_filename)
    expected = EventCounter.load(expected_events_counts_filename)
    event_counter.check_counts(expected)

    # Check if the right files were selected.
    expected = [["testfile.txt"], ["tempdir"], ["test0.txt"],
                [
                    "test0.txt", "test1.txt", "test2.txt", "test3.txt",
                    "test4.txt", "test5.txt", "test6.txt"
                ], ["../"], ["testfile.txt"]]
    assert len(selected_files) == len(expected)
    assert_arrays_equal(selected_files, expected)

    # Remove temporary directory and files
    os.remove("testfile.txt")
    for i in range(10):
        os.remove(
            os.path.join(os.getcwd(), "tempdir", "test" + str(i) + ".txt"))
    os.rmdir(os.path.join(os.getcwd(), "tempdir"))
    os.chdir("..")
    os.rmdir("testdir")

    if interactive:
        filemenu = ui.FileMenu2D(size=(500, 500), directory_path=os.getcwd())
        show_manager = window.ShowManager(size=(600, 600),
                                          title="DIPY FileMenu")
        show_manager.ren.add(filemenu)
        show_manager.start()
Example #28
0
def test_cluster_getitem():
    indices = list(range(len(data)))
    np.random.shuffle(indices)  # None trivial ordering
    advanced_indices = indices + [0, 1, 2, -1, -2, -3]

    # Test without specifying refdata in ClusterMap
    cluster = Cluster()
    cluster.assign(*indices)

    # Test indexing
    for i in advanced_indices:
        assert_equal(cluster[i], indices[i])

    # Test advanced indexing
    assert_array_equal(cluster[advanced_indices],
                       [indices[i] for i in advanced_indices])

    # Test index out of bounds
    assert_raises(IndexError, cluster.__getitem__, len(cluster))
    assert_raises(IndexError, cluster.__getitem__, -len(cluster) - 1)

    # Test slicing and negative indexing
    assert_equal(cluster[-1], indices[-1])
    assert_array_equal(cluster[::2], indices[::2])
    assert_arrays_equal(cluster[::-1], indices[::-1])
    assert_arrays_equal(cluster[:-1], indices[:-1])
    assert_arrays_equal(cluster[1:], indices[1:])

    # Test with wrong indexing object
    assert_raises(TypeError, cluster.__getitem__, "wrong")

    # Test with specifying refdata in ClusterMap
    cluster.refdata = data

    # Test indexing
    for i in advanced_indices:
        assert_array_equal(cluster[i], data[indices[i]])

    # Test advanced indexing
    assert_arrays_equal(cluster[advanced_indices],
                        [data[indices[i]] for i in advanced_indices])

    # Test index out of bounds
    assert_raises(IndexError, cluster.__getitem__, len(cluster))
    assert_raises(IndexError, cluster.__getitem__, -len(cluster) - 1)

    # Test slicing and negative indexing
    assert_array_equal(cluster[-1], data[indices[-1]])
    assert_arrays_equal(cluster[::2], [data[i] for i in indices[::2]])
    assert_arrays_equal(cluster[::-1], [data[i] for i in indices[::-1]])
    assert_arrays_equal(cluster[:-1], [data[i] for i in indices[:-1]])
    assert_arrays_equal(cluster[1:], [data[i] for i in indices[1:]])

    # Test with wrong indexing object
    assert_raises(TypeError, cluster.__getitem__, "wrong")
Example #29
0
def test_ui_radio_button(interactive=False):
    filename = "test_ui_radio_button"
    recording_filename = pjoin(DATA_DIR, filename + ".log.gz")
    expected_events_counts_filename = pjoin(DATA_DIR, filename + ".pkl")

    radio_button_test = ui.RadioButton(
        labels=["option 1", "option 2\nOption 2", "option 3", "option 4"],
        position=(10, 10))

    old_positions = []
    for option in radio_button_test.options:
        old_positions.append(option.position)
    old_positions = np.asarray(old_positions)
    radio_button_test.position = (100, 100)
    new_positions = []
    for option in radio_button_test.options:
        new_positions.append(option.position)
    new_positions = np.asarray(new_positions)
    npt.assert_allclose(new_positions - old_positions,
                        90 * np.ones((4, 2)))

    selected_option = []

    def _on_change(radio_button):
        selected_option.append(radio_button.checked)

    # Set up a callback when selection changes
    radio_button_test.on_change = _on_change

    event_counter = EventCounter()
    event_counter.monitor(radio_button_test)

    # Create a show manager and record/play events.
    show_manager = window.ShowManager(size=(600, 600),
                                      title="DIPY Checkbox")
    show_manager.ren.add(radio_button_test)

    # Recorded events:
    #  1. Click on button of option 1.
    #  2. Click on button of option 2.
    #  3. Click on button of option 2.
    #  4. Click on text of option 2.
    #  5. Click on button of option 1.
    #  6. Click on text of option 3.
    #  7. Click on button of option 4.
    #  8. Click on text of option 4.
    show_manager.play_events_from_file(recording_filename)
    expected = EventCounter.load(expected_events_counts_filename)
    event_counter.check_counts(expected)

    # Check if the right options were selected.
    expected = [['option 1'], ['option 2\nOption 2'], ['option 2\nOption 2'],
                ['option 2\nOption 2'], ['option 1'], ['option 3'],
                ['option 4'], ['option 4']]
    assert len(selected_option) == len(expected)
    assert_arrays_equal(selected_option, expected)
    del show_manager

    if interactive:
        radio_button_test = ui.RadioButton(
            labels=["option 1", "option 2\nOption 2", "option 3", "option 4"],
            position=(100, 100))
        showm = window.ShowManager(size=(600, 600))
        showm.ren.add(radio_button_test)
        showm.start()