Ejemplo n.º 1
1
    def test_square_matrices_1(self):
        op4 = OP4()
        # matrices = op4.read_op4(os.path.join(op4Path, fname))
        form1 = 1
        form2 = 2
        form3 = 2
        from numpy import matrix, ones, reshape, arange

        A1 = matrix(ones((3, 3), dtype="float64"))
        A2 = reshape(arange(9, dtype="float64"), (3, 3))
        A3 = matrix(ones((1, 1), dtype="float32"))
        matrices = {"A1": (form1, A1), "A2": (form2, A2), "A3": (form3, A3)}

        for (is_binary, fname) in [(False, "small_ascii.op4"), (True, "small_binary.op4")]:
            op4_filename = os.path.join(op4Path, fname)
            op4.write_op4(op4_filename, matrices, name_order=None, precision="default", is_binary=False)
            matrices2 = op4.read_op4(op4_filename, precision="default")
            (form1b, A1b) = matrices2["A1"]
            (form2b, A2b) = matrices2["A2"]
            self.assertEqual(form1, form1b)
            self.assertEqual(form2, form2b)

            (form1b, A1b) = matrices2["A1"]
            (form2b, A2b) = matrices2["A2"]
            (form3b, A3b) = matrices2["A3"]
            self.assertEqual(form1, form1b)
            self.assertEqual(form2, form2b)
            self.assertEqual(form3, form3b)

            self.assertTrue(array_equal(A1, A1b))
            self.assertTrue(array_equal(A2, A2b))
            self.assertTrue(array_equal(A3, A3b))
            del A1b, A2b, A3b
            del form1b, form2b, form3b
Ejemplo n.º 2
0
def test_intersect_time():
    image = np.random.rand(5, 3600)
    spec = LinearTimeSpectrogram(image,
        np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
        np.array([8, 6, 4, 2, 0]),
        datetime(2010, 1, 1, 0, 15),
        datetime(2010, 1, 1, 0, 30),
        900,
        0.25
    )
    image = np.random.rand(5, 3600)
    spec2 = LinearTimeSpectrogram(image,
        np.linspace(0, 0.25 * (image.shape[1] - 1), image.shape[1]),
        np.array([9, 7, 5, 3, 1]),
        datetime(2010, 1, 1, 0, 15),
        datetime(2010, 1, 1, 0, 30),
        901,
        0.25
    )

    one, other = LinearTimeSpectrogram.intersect_time(
        [spec, spec2]
    )

    assert one.shape[1] == other.shape[1]
    assert one.shape[1] == 3596
    assert np.array_equal(one.data, spec.data[:, 4:])
    assert np.array_equal(other.data, spec2.data[:, :-4])

    assert np.array_equal(one.time_axis, other.time_axis)
    assert one.t_init == other.t_init
    assert is_linear(one.time_axis)
    assert is_linear(other.time_axis)
Ejemplo n.º 3
0
  def testReadWrite(self):
    original = self._encoder(self.n, name=self.name)
    originalValue = original.encode([1,0,1,0,1,0,1,0,1])

    proto1 = PassThroughEncoderProto.new_message()
    original.write(proto1)

    # Write the proto to a temp file and read it back into a new proto
    with tempfile.TemporaryFile() as f:
      proto1.write(f)
      f.seek(0)
      proto2 = PassThroughEncoderProto.read(f)

    encoder = PassThroughEncoder.read(proto2)

    self.assertIsInstance(encoder, PassThroughEncoder)
    self.assertEqual(encoder.name, original.name)
    self.assertEqual(encoder.verbosity, original.verbosity)
    self.assertEqual(encoder.w, original.w)
    self.assertEqual(encoder.n, original.n)
    self.assertEqual(encoder.description, original.description)
    self.assertTrue(numpy.array_equal(encoder.encode([1,0,1,0,1,0,1,0,1]),
                                      originalValue))
    self.assertEqual(original.decode(encoder.encode([1,0,1,0,1,0,1,0,1])),
                     encoder.decode(original.encode([1,0,1,0,1,0,1,0,1])))

    # Feed in a new value and ensure the encodings match
    result1 = original.encode([0,1,0,1,0,1,0,1,0])
    result2 = encoder.encode([0,1,0,1,0,1,0,1,0])
    self.assertTrue(numpy.array_equal(result1, result2))
Ejemplo n.º 4
0
def test_rescale(arr, ndv, dst_dtype):
    if dst_dtype == np.__dict__['uint16']:
        assert np.array_equal(
                _rescale(arr, ndv, dst_dtype),
                np.concatenate(
                    [
                        (arr).astype(dst_dtype),
                        _simple_mask(
                            arr.astype(dst_dtype),
                            (ndv, ndv, ndv)
                        ).reshape(1, arr.shape[1], arr.shape[2])
                    ]
                )
            )
    else:
        assert np.array_equal(
                _rescale(arr, ndv, dst_dtype),
                np.concatenate(
                    [
                        (arr / 257.0).astype(dst_dtype),
                        _simple_mask(
                            arr.astype(dst_dtype),
                            (ndv, ndv, ndv)
                        ).reshape(1, arr.shape[1], arr.shape[2])
                    ]
                )
            )
Ejemplo n.º 5
0
 def testNegativeDelta(self):
   self.assertTrue(
       np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0])))
   self.assertTrue(
       np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))
   self.assertTrue(
       np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8])))
    def test_train(self):
        # y = x + 0
        x1 = np.array([2, 4])
        y1 = np.array([2, 4])
        m1 = LinearRegression(1)
        m1.train(x1, y1, n_iter=1, lr=0.1)

        # expected W and b after 1 iteration with lr 0.1
        exp_W1 = np.array([1.0])
        exp_b1 = 0.3
        self.assertTrue(np.array_equal(m1.W, exp_W1))
        self.assertAlmostEqual(m1.b[0], exp_b1)

        # y = x1 + x2 + 0
        x2 = np.array([[2, 2],
                       [4, 4]])
        y2 = np.array([4, 8])
        m2 = LinearRegression(2)
        m2.train(x2, y2, n_iter=1, lr=0.1)

        # expected W and b after 1 iteration with lr 0.1
        exp_W2 = np.array([2.0, 2.0])
        exp_b2 = 0.6
        self.assertTrue(np.array_equal(m2.W, exp_W2))
        self.assertAlmostEqual(m2.b[0], exp_b2)
Ejemplo n.º 7
0
def _test_column_grouping(m=10, n=5000, num_repeat=5, verbose=False):
    print('\nTesting column_grouping ...')
    A = np.array([[True, False, False, False, False],
                  [True, True, False, True, True]])
    grps1 = _column_group_loop(A)
    grps2 = _column_group_recursive(A)
    grps3 = [np.array([0]),
             np.array([1, 3, 4]),
             np.array([2])]
    print('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail')
    print('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps3)]) else 'Fail')

    for i in range(0, num_repeat):
        A = np.random.rand(m, n)
        B = A > 0.5
        start = time.time()
        grps1 = _column_group_loop(B)
        elapsed_loop = time.time() - start
        start = time.time()
        grps2 = _column_group_recursive(B)
        elapsed_recursive = time.time() - start
        if verbose:
            print('Loop     :', elapsed_loop)
            print('Recursive:', elapsed_recursive)
        print('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail')
    # sorted_idx = np.concatenate(grps)
    # print B
    # print sorted_idx
    # print B[:,sorted_idx]
    return
Ejemplo n.º 8
0
def Color_Features_Extract(img_folder):
    print "Color_Features_Extract Start"
    starttime = datetime.datetime.now()

    back = np.array([255,128,128])

    image_num = len(os.listdir(seg_img_folder))

    Color_Features = []

    for index, image_name in enumerate(os.listdir(img_folder)):
        image_path = img_folder + str("/") +image_name
        image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2LAB)
        rows, columns, lab = image.shape

        # Make densely-sampling color features
        pixel_index = 0

        for x in range(rows):
            for y in range(columns):
                if pixel_index % 9 == 0 and np.array_equal(image[x][y],back) == False:
                    Color_Features.append(image[x][y].tolist())
                pixel_index += 1

    # Get CodeBook of Color_Features
    Color_Features = np.float32(Color_Features)

    # Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

    # Set flags (Just to avoid line break in the code)
    flags = cv2.KMEANS_RANDOM_CENTERS

    # Apply KMeans
    compactness,labels,centers = cv2.kmeans(Color_Features,800,None,criteria,10,flags)

    Image_Color_Features = [[0 for x in range(800)] for y in range(image_num)]

    color_index = 0

    for image_index, image_name in enumerate(os.listdir(img_folder)):
        image_path = img_folder + str("/") +image_name
        image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2LAB)
        rows, columns, lab = image.shape

        pixel_index = 0

        for x in range(rows):
            for y in range(columns):
                if pixel_index % 9 == 0 and np.array_equal(image[x][y],back) == False:
                    Image_Color_Features[image_index][labels[color_index]] += 1
                    color_index += 1
                pixel_index += 1
        print image_name

    endtime = datetime.datetime.now()
    print "Time: " + str((endtime - starttime).seconds) + "s"
    print "Color_Features_Extract End"

    return Image_Color_Features
Ejemplo n.º 9
0
    def test_distance(self):
        a = np.array([1, 2])
        b = np.array([2, 1])
        self.assertEquals(np.sqrt(2), distance(a, b))
        self.assertAlmostEquals(0.2117,
                                distance(a, b, func=sigmod), places=3)
        self.assertAlmostEquals(0.26,
                                distance(a, b, func=gaussian), places=1)

        a = np.array([[1, 2], [2,1]])
        b = np.array([[2, 1], [1,2]])
        self.assertTrue(np.array_equal(np.array([np.sqrt(2), np.sqrt(2)]),
                            distance(a,b)))

        a = np.array([[1, 2], [2,1]])
        b = np.array([2, 1])
        self.assertTrue(np.array_equal(np.array([np.sqrt(2), np.sqrt(0)]),
                            distance(a,b)))

        a = np.array([[1, 2], [2,1]])
        b = np.array([[2, 1], [1,2]])
        self.assertTrue(np.array_equal(np.array([2, 2]),
                            distance(a,b, mode='manhatton')))

        a = np.array([[2, 1], [2,1]])
        b = np.array([[2, 1], [1,2]])
        print distance(a,b, mode='pearson')
def _testFloodFill(SegmentationHelper):
    filledMask = SegmentationHelper._floodFill(
        testImage, (1, 1), 5, connectivity=8)
    assert numpy.array_equal(
        filledMask,
        numpy.array([
            [255, 255, 0, 0, 0, 0],
            [255, 255, 255, 255, 0, 0],
            [0, 255, 0, 255, 255, 255],
            [0, 255, 255, 255, 0, 0],
            [0, 0, 255, 0, 0, 0],
            [0, 0, 0, 255, 0, 0]
        ], dtype=numpy.uint8))
    assert numpy.array_equal(testImage, originalTestImage)

    # Now, with connectivity=4
    filledMask = SegmentationHelper._floodFill(
        testImage, (1, 1), 5, connectivity=4)
    assert numpy.array_equal(
        filledMask,
        numpy.array([
            [255, 255, 0, 0, 0, 0],
            [255, 255, 255, 255, 0, 0],
            [0, 255, 0, 255, 255, 255],
            [0, 255, 255, 255, 0, 0],
            [0, 0, 255, 0, 0, 0],
            [0, 0, 0, 0, 0, 0]
        ], dtype=numpy.uint8))
    assert numpy.array_equal(testImage, originalTestImage)
Ejemplo n.º 11
0
def test_rasterize_supported_dtype(basic_geometry):
    """ Supported data types should return valid results """

    with Env():
        supported_types = (
            ('int16', -32768),
            ('int32', -2147483648),
            ('uint8', 255),
            ('uint16', 65535),
            ('uint32', 4294967295),
            ('float32', 1.434532),
            ('float64', -98332.133422114)
        )

        for dtype, default_value in supported_types:
            truth = np.zeros(DEFAULT_SHAPE, dtype=dtype)
            truth[2:4, 2:4] = default_value

            result = rasterize(
                [basic_geometry],
                out_shape=DEFAULT_SHAPE,
                default_value=default_value,
                dtype=dtype
            )
            assert np.array_equal(result, truth)
            assert np.dtype(result.dtype) == np.dtype(truth.dtype)

            result = rasterize(
                [(basic_geometry, default_value)],
                out_shape=DEFAULT_SHAPE
            )
            if np.dtype(dtype).kind == 'f':
                assert np.allclose(result, truth)
            else:
                assert np.array_equal(result, truth)
Ejemplo n.º 12
0
def test_Period():
    p = Period(numbers[0:2], units[0])
    assert sa_asserts.sa_access(p)
    assert i_asserts.interval(p, start=numbers[0], stop=numbers[1])
    assert np.array_equal(p.info['start'], p.start)
    assert np.array_equal(p.info['stop'], p.stop)
    assert np.array_equal(p.info['length'], p.length)
Ejemplo n.º 13
0
  def testReadWrite(self):
    """Test ScalarEncoder Cap'n Proto serialization implementation."""
    originalValue = self._l.encode(1)

    proto1 = ScalarEncoderProto.new_message()
    self._l.write(proto1)

    # Write the proto to a temp file and read it back into a new proto
    with tempfile.TemporaryFile() as f:
      proto1.write(f)
      f.seek(0)
      proto2 = ScalarEncoderProto.read(f)

    encoder = ScalarEncoder.read(proto2)

    self.assertIsInstance(encoder, ScalarEncoder)
    self.assertEqual(encoder.w, self._l.w)
    self.assertEqual(encoder.minval, self._l.minval)
    self.assertEqual(encoder.maxval, self._l.maxval)
    self.assertEqual(encoder.periodic, self._l.periodic)
    self.assertEqual(encoder.n, self._l.n)
    self.assertEqual(encoder.radius, self._l.radius)
    self.assertEqual(encoder.resolution, self._l.resolution)
    self.assertEqual(encoder.name, self._l.name)
    self.assertEqual(encoder.verbosity, self._l.verbosity)
    self.assertEqual(encoder.clipInput, self._l.clipInput)
    self.assertTrue(numpy.array_equal(encoder.encode(1), originalValue))
    self.assertEqual(self._l.decode(encoder.encode(1)),
                     encoder.decode(self._l.encode(1)))

    # Feed in a new value and ensure the encodings match
    result1 = self._l.encode(7)
    result2 = encoder.encode(7)
    self.assertTrue(numpy.array_equal(result1, result2))
Ejemplo n.º 14
0
    def test_A(self):
        cid0 = CORD2R()
        Lx = 2.
        Ly = 0.
        Lz = 3.
        Fy = 1.
        origin = array([-Lx, 0., -Lz])
        z_axis = origin + array([0., 0., 1.])
        xz_plane = origin + array([1., 0., 1.])
        rid = 0
        data = [1, rid] + list(origin) + list(z_axis) + list(xz_plane)

        Fxyz = [0., -Fy, 0.]
        Mxyz = [0., 0., 0.]
        cid_new = CORD2R(data=data)
        model = None

        Fxyz_local, Mxyz_local = TransformLoadWRT(Fxyz, Mxyz, cid0, cid_new,
                                                  model, is_cid_int=False)

        r = array([Lx, Ly, Lz])
        F = array([0., -Fy, 0.])
        M = cross(r, F)
        self.assertTrue(array_equal(Fxyz_local, F)), "expected=%s actual=%s" % (F, Fxyz_local)
        self.assertTrue(array_equal(Mxyz_local, cross(r, F))), "expected=%s actual=%s" % (M, Mxyz_local)
Ejemplo n.º 15
0
def test_convert_r_matrix():

    is_na = robj.baseenv.get("is.na")

    seriesd = _test.getSeriesData()
    frame = pd.DataFrame(seriesd, columns=['D', 'C', 'B', 'A'])
    # Null data
    frame["E"] = [np.nan for item in frame["A"]]

    r_dataframe = convert_to_r_matrix(frame)

    assert np.array_equal(convert_robj(r_dataframe.rownames), frame.index)
    assert np.array_equal(convert_robj(r_dataframe.colnames), frame.columns)
    assert all(is_na(item) for item in r_dataframe.rx(True, "E"))

    for column in frame[["A", "B", "C", "D"]]:
        coldata = r_dataframe.rx(True, column)
        original_data = frame[column]
        assert np.array_equal(convert_robj(coldata),
                              original_data)

    # Pandas bug 1282
    frame["F"] = ["text" if item % 2 == 0 else np.nan for item in range(30)]

    # FIXME: Ugly, this whole module needs to be ported to nose/unittest
    try:
        wrong_matrix = convert_to_r_matrix(frame)
    except TypeError:
        pass
    except Exception:
        raise
Ejemplo n.º 16
0
    def test_n_largest_area_contours_images__with_invert(self):
        # given
        image = cv2.imread("./images/SnipNLargestAreaContours/"
                           "test_n_largest_area_contours_image__with_invert__input_image.png",
                           cv2.IMREAD_GRAYSCALE)
        n = 2
        invert_flag = True
        snip_n_largest_area_contours = SnipNLargestAreaContours(image, n, invert_flag)

        expected_largest_contour_image_1 = cv2.imread(
            "./images/SnipNLargestAreaContours/test_n_largest_area_contours_images__with_invert__snipped_image_1.png",
            flags=cv2.IMREAD_GRAYSCALE)
        expected_largest_contour_image_2 = cv2.imread(
            "./images/SnipNLargestAreaContours/test_n_largest_area_contours_images__with_invert__snipped_image_2.png",
            flags=cv2.IMREAD_GRAYSCALE)

        expected_n_largest_area_contours_images = [expected_largest_contour_image_1, expected_largest_contour_image_2]

        # when
        actual_n_largest_area_contours_images = snip_n_largest_area_contours.n_largest_area_contours_images

        # that
        self.assertEqual(np.array_equal(actual_n_largest_area_contours_images[0],
                                        expected_n_largest_area_contours_images[0]), True)
        self.assertEqual(np.array_equal(actual_n_largest_area_contours_images[1],
                                        expected_n_largest_area_contours_images[1]), True)
Ejemplo n.º 17
0
def test_convert_matrix():
    mat = _test_matrix()

    converted = convert_robj(mat)

    assert np.array_equal(converted.index, ['a', 'b', 'c'])
    assert np.array_equal(converted.columns, ['one', 'two', 'three'])
Ejemplo n.º 18
0
def test_convert_r_dataframe():

    is_na = robj.baseenv.get("is.na")

    seriesd = _test.getSeriesData()
    frame = pd.DataFrame(seriesd, columns=['D', 'C', 'B', 'A'])

    # Null data
    frame["E"] = [np.nan for item in frame["A"]]
    # Some mixed type data
    frame["F"] = ["text" if item % 2 == 0 else np.nan for item in range(30)]

    r_dataframe = convert_to_r_dataframe(frame)

    assert np.array_equal(convert_robj(r_dataframe.rownames), frame.index)
    assert np.array_equal(convert_robj(r_dataframe.colnames), frame.columns)
    assert all(is_na(item) for item in r_dataframe.rx2("E"))

    for column in frame[["A", "B", "C", "D"]]:
        coldata = r_dataframe.rx2(column)
        original_data = frame[column]
        assert np.array_equal(convert_robj(coldata), original_data)

    for column in frame[["D", "E"]]:
        for original, converted in zip(frame[column],
                                       r_dataframe.rx2(column)):

            if pd.isnull(original):
                assert is_na(converted)
            else:
                assert original == converted
Ejemplo n.º 19
0
    def test_mle(self):
        states = ["0", "1", "2", "3"]
        alphabet = ["A", "C", "G", "T"]
        training_data = [("AACCCGGGTTTTTTT", "001112223333333"),
                         ("ACCGTTTTTTT", "01123333333"),
                         ("ACGGGTTTTTT", "01222333333"),
                         ("ACCGTTTTTTTT", "011233333333"), ]
        training_outputs = array([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3], [
                                 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3], [0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3], [0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3]])
        training_states = array([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3], [
                                0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3], [0, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3], [0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3]])

        p_initial = array([1., 0., 0., 0.])
        p_transition = array([[0.2, 0.8, 0., 0.],
                              [0., 0.5, 0.5, 0.],
                              [0., 0., 0.5, 0.5],
                              [0., 0., 0., 1.]])
        p_emission = array(
            [[0.66666667, 0.11111111, 0.11111111, 0.11111111],
             [0.08333333, 0.75, 0.08333333, 0.08333333],
             [0.08333333, 0.08333333, 0.75, 0.08333333],
             [0.03125, 0.03125, 0.03125, 0.90625]])
        p_initial_out, p_transition_out, p_emission_out = MarkovModel._mle(
            len(states), len(alphabet), training_outputs, training_states, None, None, None)
        self.assertTrue(
            array_equal(around(p_initial_out, decimals=3), around(p_initial, decimals=3)))
        self.assertTrue(
            array_equal(around(p_transition_out, decimals=3), around(p_transition, decimals=3)))
        self.assertTrue(
            array_equal(around(p_emission_out, decimals=3), around(p_emission, decimals=3)))
  def testString(self):
    indices = constant_op.constant([[4], [3], [1], [7]],
                                   dtype=dtypes.int32)
    updates = constant_op.constant(["four", "three", "one", "seven"],
                                   dtype=dtypes.string)
    expected = np.array([b"", b"one", b"", b"three", b"four",
                         b"", b"", b"seven"])
    scatter = self.scatter_nd(indices, updates, shape=(8,))
    with self.cached_session() as sess:
      result = sess.run(scatter)
      self.assertAllEqual(expected, result)

    # Same indice is updated twice by same value.
    indices = constant_op.constant([[4], [3], [3], [7]],
                                   dtype=dtypes.int32)
    updates = constant_op.constant(["a", "b", "b", "c"],
                                   dtype=dtypes.string)
    expected = np.array([b"", b"", b"", b"bb", b"a", b"", b"", b"c"])
    scatter = self.scatter_nd(indices, updates, shape=(8,))
    with self.cached_session() as sess:
      result = sess.run(scatter)
      self.assertAllEqual(expected, result)

    # Same indice is updated twice by different value.
    indices = constant_op.constant([[4], [3], [3], [7]],
                                   dtype=dtypes.int32)
    updates = constant_op.constant(["a", "b", "c", "d"],
                                   dtype=dtypes.string)
    expected = [np.array([b"", b"", b"", b"bc", b"a", b"", b"", b"d"]),
                np.array([b"", b"", b"", b"cb", b"a", b"", b"", b"d"])]
    scatter = self.scatter_nd(indices, updates, shape=(8,))
    with self.cached_session() as sess:
      result = sess.run(scatter)
      self.assertTrue(np.array_equal(result, expected[0]) or
                      np.array_equal(result, expected[1]))
Ejemplo n.º 21
0
def _serialize_volume_info(volume_info):
    """An implementation of nibabel.freesurfer.io._serialize_volume_info, since
    old versions of nibabel (<=2.1.0) don't have it."""
    keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
            'zras', 'cras']
    diff = set(volume_info.keys()).difference(keys)
    if len(diff) > 0:
        raise ValueError('Invalid volume info: %s.' % diff.pop())

    strings = list()
    for key in keys:
        if key == 'head':
            if not (np.array_equal(volume_info[key], [20]) or np.array_equal(
                    volume_info[key], [2, 0, 20])):
                warnings.warn("Unknown extension code.")
            strings.append(np.array(volume_info[key], dtype='>i4').tostring())
        elif key in ('valid', 'filename'):
            val = volume_info[key]
            strings.append('{} = {}\n'.format(key, val).encode('utf-8'))
        elif key == 'volume':
            val = volume_info[key]
            strings.append('{} = {} {} {}\n'.format(
                key, val[0], val[1], val[2]).encode('utf-8'))
        else:
            val = volume_info[key]
            strings.append('{} = {:0.10g} {:0.10g} {:0.10g}\n'.format(
                key.ljust(6), val[0], val[1], val[2]).encode('utf-8'))
    return b''.join(strings)
Ejemplo n.º 22
0
    def test_save_and_load(self):
        states = "NR"
        alphabet = "AGTC"
        p_initial = array([1.0, 0.0])
        p_transition = array([[0.75, 0.25], [0.25, 0.75]])
        p_emission = array(
            [[0.45, 0.36, 0.06, 0.13], [0.24, 0.18, 0.12, 0.46]])
        markov_model_save = MarkovModel.MarkovModel(
            states,
            alphabet,
            p_initial,
            p_transition,
            p_emission)

        handle = StringIO()
        MarkovModel.save(markov_model_save, handle)
        handle.seek(0)
        markov_model_load = MarkovModel.load(handle)

        self.assertEqual(''.join(markov_model_load.states), states)
        self.assertEqual(''.join(markov_model_load.alphabet), alphabet)
        self.assertTrue(array_equal(markov_model_load.p_initial, p_initial))
        self.assertTrue(array_equal
                        (markov_model_load.p_transition, p_transition))
        self.assertTrue(array_equal(markov_model_load.p_emission, p_emission))
Ejemplo n.º 23
0
    def test_find_optimal_paras_rf(self):
        # first, load the file
        f = h5py.File('tuning_ref_results/NIS_results.hdf5', 'r+')
        # get the filter
        grp = f['ICA/tuning']
        wica = grp['Wica'][:]  # should be
        # get the reference results
        ica_optx = grp.attrs['ica_optx']
        ica_opty = grp.attrs['ica_opty']
        ica_optfreq = grp.attrs['ica_optfreq']
        ica_optor = grp.attrs['ica_optor']
        ica_optphase = grp.attrs['ica_optphase']

        # now shuffle this Wica. 1024x256
        wica = transpose_c_and_f(wica.T)

        # get result
        result = tuning.find_optimal_paras_rf(w=wica, legacy=True)

        # print result
        self.assertTrue(np.allclose(result['optx'], ica_optx))
        self.assertTrue(np.allclose(result['opty'], ica_opty))
        self.assertTrue(np.array_equal(result['optfreq'], ica_optfreq))
        self.assertTrue(np.array_equal(result['optor'], ica_optor))
        self.assertTrue(np.array_equal(result['optphase'], ica_optphase))
        f.close()
Ejemplo n.º 24
0
    def test_tz_localize_dti(self):
        from pandas.tseries.offsets import Hour

        dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',
                            freq='L')
        dti2 = dti.tz_localize('US/Eastern')

        dti_utc = DatetimeIndex(start='1/1/2005 05:00',
                                end='1/1/2005 5:00:30.256', freq='L',
                                tz='utc')

        self.assert_(np.array_equal(dti2.values, dti_utc.values))

        dti3 = dti2.tz_convert('US/Pacific')
        self.assert_(np.array_equal(dti3.values, dti_utc.values))

        dti = DatetimeIndex(start='11/6/2011 1:59',
                            end='11/6/2011 2:00', freq='L')
        self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
                          'US/Eastern')

        dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',
                            freq='L')
        self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,
                          'US/Eastern')
Ejemplo n.º 25
0
def test_Moster13SmHm_behavior():
	"""
	"""
	default_model = Moster13SmHm()
	mstar1 = default_model.mean_stellar_mass(prim_haloprop = 1.e12)
	ratio1 = mstar1/3.4275e10
	np.testing.assert_array_almost_equal(ratio1, 1.0, decimal=3)

	default_model.param_dict['n10'] *= 1.1
	mstar2 = default_model.mean_stellar_mass(prim_haloprop = 1.e12)
	assert mstar2 > mstar1

	default_model.param_dict['n11'] *= 1.1
	mstar3 = default_model.mean_stellar_mass(prim_haloprop = 1.e12)
	assert mstar3 == mstar2

	mstar4_z1 = default_model.mean_stellar_mass(prim_haloprop = 1.e12, redshift=1)
	default_model.param_dict['n11'] *= 1.1
	mstar5_z1 = default_model.mean_stellar_mass(prim_haloprop = 1.e12, redshift=1)
	assert mstar5_z1 != mstar4_z1

	mstar_realization1 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=43)
	mstar_realization2 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=43)
	mstar_realization3 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=44)
	assert np.array_equal(mstar_realization1, mstar_realization2)
	assert not np.array_equal(mstar_realization1, mstar_realization3)

	measured_scatter1 = np.std(np.log10(mstar_realization1))
	model_scatter = default_model.param_dict['scatter_model_param1']
	np.testing.assert_allclose(measured_scatter1, model_scatter, rtol=1e-3)

	default_model.param_dict['scatter_model_param1'] = 0.3
	mstar_realization4 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=43)
	measured_scatter4 = np.std(np.log10(mstar_realization4))
	np.testing.assert_allclose(measured_scatter4, 0.3, rtol=1e-3)
Ejemplo n.º 26
0
def test_tabulate():
    scalar_no_units = lambda wlen: math.sqrt(wlen)
    scalar_with_units = lambda wlen: math.sqrt(wlen.value)
    array_no_units = lambda wlen: 1. + np.sqrt(wlen)
    array_with_units = lambda wlen: np.sqrt(wlen.value)
    add_units = lambda fval: (lambda wlen: fval(wlen) * u.erg)
    wlen = np.arange(1, 3) * u.Angstrom
    for v in True, False:
        # Test each mode without any function return units.
        f1 = tabulate_function_of_wavelength(scalar_no_units, wlen, v)
        assert f1[1] == None
        f2 = tabulate_function_of_wavelength(scalar_with_units, wlen, v)
        assert f2[1] == None
        f3 = tabulate_function_of_wavelength(array_no_units, wlen, v)
        assert f3[1] == None
        f4 = tabulate_function_of_wavelength(scalar_with_units, wlen, v)
        assert f4[1] == None
        # Now test with return units.
        g1 = tabulate_function_of_wavelength(
            add_units(scalar_no_units), wlen, v)
        assert np.array_equal(f1[0], g1[0]) and g1[1] == u.erg
        g2 = tabulate_function_of_wavelength(
            add_units(scalar_with_units), wlen, v)
        assert np.array_equal(f2[0], g2[0]) and g2[1] == u.erg
        g3 = tabulate_function_of_wavelength(
            add_units(array_no_units), wlen, v)
        assert np.array_equal(f3[0], g3[0]) and g3[1] == u.erg
        g4 = tabulate_function_of_wavelength(
            add_units(scalar_with_units), wlen, v)
        assert np.array_equal(f4[0], g4[0]) and g4[1] == u.erg
Ejemplo n.º 27
0
 def test_setPairMask(self):
     '''check different setPairMask arguments.
     '''
     bdc = self.bdc
     dall = bdc(self.nickel)
     bdc.maskAllPairs(False)
     self.assertEqual(0, len(bdc(self.nickel)))
     for i in range(4):
         bdc.setPairMask(0, i, True)
     dst0a = bdc(self.nickel)
     bdc.setPairMask(range(4), 0, True, others=False)
     dst0b = bdc(self.nickel)
     self.assertTrue(numpy.array_equal(dst0a, dst0b))
     bdc.maskAllPairs(False)
     bdc.setPairMask(0, -7, True)
     dst0c = bdc(self.nickel)
     self.assertTrue(numpy.array_equal(dst0a, dst0c))
     bdc.maskAllPairs(False)
     bdc.setPairMask(0, 'all', True)
     dst0d = bdc(self.nickel)
     self.assertTrue(numpy.array_equal(dst0a, dst0d))
     bdc.setPairMask('all', 'all', False)
     self.assertEqual(0, len(bdc(self.nickel)))
     bdc.setPairMask('all', range(4), True)
     dall2 = bdc(self.nickel)
     self.assertTrue(numpy.array_equal(dall, dall2))
     self.assertRaises(ValueError, bdc.setPairMask, 'fooo', 2, True)
     self.assertRaises(ValueError, bdc.setPairMask, 'aLL', 2, True)
     return
Ejemplo n.º 28
0
def test_two_triangles_without_edges():
    grid = two_triangles_with_depths()
    grid.edges = None

    grid.save_as_netcdf("2_triangles_without_edges.nc")

    # read it back in and check it out
    ug = UGrid.from_ncfile("2_triangles_without_edges.nc", load_data=True)

    assert ug.nodes.shape == (4, 2)
    assert ug.nodes.shape == grid.nodes.shape

    # not ideal to pull specific values out, but how else to test?
    assert np.array_equal(ug.nodes[0, :], (0.1, 0.1))
    assert np.array_equal(ug.nodes[-1, :], (3.1, 2.1))
    assert np.array_equal(ug.nodes, grid.nodes)

    assert ug.faces.shape == grid.faces.shape

    assert ug.edges is None

    depths = find_depths(ug)
    assert depths.data.shape == (4,)
    assert depths.data[0] == 1
    assert depths.attributes["units"] == "unknown"
Ejemplo n.º 29
0
def test_StatesMatrixMerger():
    val_mat1_text = list(tools.ngram(test_text_df["text"].values[0], [1]))
    val_mat2_text = list(tools.ngram(test_text_df["text"].values[1], [1]))
    val_mat1 = ValuesMatrix(val_mat1_text, force2d="as_col")
    val_mat2 = ValuesMatrix(val_mat2_text, force2d="as_col")

    idx_data_mat1 = val_mat1.build_index_data_matrix()
    idx_data_mat2 = val_mat2.build_index_data_matrix()
    idx_data_mat1_old_ref_data = idx_data_mat1.states_matrix._ref_data
    idx_data_mat2_old_ref_data = idx_data_mat2.states_matrix._ref_data

    old_idx_data_mat1 = idx_data_mat1.index_matrix.copy()
    old_idx_data_mat2 = idx_data_mat2.index_matrix.copy()

    assert len(idx_data_mat1_old_ref_data) > 0
    assert len(idx_data_mat2_old_ref_data) > 0

    states_mats_merger = StatesMatrixMerger(idx_data_mat1.states_matrix,
                                            idx_data_mat2.states_matrix)

    assert len(states_mats_merger._unique_states_matrix_ids) > 1

    states_mats_merger.update()

    assert len(idx_data_mat1_old_ref_data) == 0
    assert len(idx_data_mat2_old_ref_data) == 0
    assert id(idx_data_mat1.states_matrix) == id(idx_data_mat2.states_matrix)
    assert len(idx_data_mat2.states_matrix._ref_data) > 1

    assert np.array_equal(idx_data_mat1.index_matrix, old_idx_data_mat1)
    assert not np.array_equal(idx_data_mat2.index_matrix, old_idx_data_mat2)

    assert np.array_equal(idx_data_mat1._data, val_mat1)
    assert np.array_equal(idx_data_mat2._data, val_mat2)
Ejemplo n.º 30
0
def _read_volume_info(fobj):
    """An implementation of nibabel.freesurfer.io._read_volume_info, since old
    versions of nibabel (<=2.1.0) don't have it.
    """
    volume_info = dict()
    head = np.fromfile(fobj, '>i4', 1)
    if not np.array_equal(head, [20]):  # Read two bytes more
        head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])
        if not np.array_equal(head, [2, 0, 20]):
            warnings.warn("Unknown extension code.")
            return volume_info

    volume_info['head'] = head
    for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
                'zras', 'cras']:
        pair = fobj.readline().decode('utf-8').split('=')
        if pair[0].strip() != key or len(pair) != 2:
            raise IOError('Error parsing volume info.')
        if key in ('valid', 'filename'):
            volume_info[key] = pair[1].strip()
        elif key == 'volume':
            volume_info[key] = np.array(pair[1].split()).astype(int)
        else:
            volume_info[key] = np.array(pair[1].split()).astype(float)
    # Ignore the rest
    return volume_info
Ejemplo n.º 31
0
def _fit_xdawn(epochs_data, y, n_components, reg=None, signal_cov=None,
               events=None, tmin=0., sfreq=1., method_params=None, info=None):
    """Fit filters and coefs using Xdawn Algorithm.

    Xdawn is a spatial filtering method designed to improve the signal
    to signal + noise ratio (SSNR) of the event related responses. Xdawn was
    originally designed for P300 evoked potential by enhancing the target
    response with respect to the non-target response. This implementation is a
    generalization to any type of event related response.

    Parameters
    ----------
    epochs_data : array, shape (n_epochs, n_channels, n_times)
        The epochs data.
    y : array, shape (n_epochs)
        The epochs class.
    n_components : int (default 2)
        The number of components to decompose the signals signals.
    reg : float | str | None (default None)
        If not None (same as ``'empirical'``, default), allow
        regularization for covariance estimation.
        If float, shrinkage is used (0 <= shrinkage <= 1).
        For str options, ``reg`` will be passed as ``method`` to
        :func:`mne.compute_covariance`.
    signal_cov : None | Covariance | array, shape (n_channels, n_channels)
        The signal covariance used for whitening of the data.
        if None, the covariance is estimated from the epochs signal.
    events : array, shape (n_epochs, 3)
        The epochs events, used to correct for epochs overlap.
    tmin : float
        Epochs starting time. Only used if events is passed to correct for
        epochs overlap.
    sfreq : float
        Sampling frequency.  Only used if events is passed to correct for
        epochs overlap.

    Returns
    -------
    filters : array, shape (n_channels, n_channels)
        The Xdawn components used to decompose the data for each event type.
    patterns : array, shape (n_channels, n_channels)
        The Xdawn patterns used to restore the signals for each event type.
    evokeds : array, shape (n_class, n_components, n_times)
        The independent evoked responses per condition.
    """
    n_epochs, n_channels, n_times = epochs_data.shape

    classes = np.unique(y)

    # XXX Eventually this could be made to deal with rank deficiency properly
    # by exposing this "rank" parameter, but this will require refactoring
    # the linalg.eigh call to operate in the lower-dimension
    # subspace, then project back out.

    # Retrieve or compute whitening covariance
    if signal_cov is None:
        signal_cov = _regularized_covariance(
            np.hstack(epochs_data), reg, method_params, info, rank='full')
    elif isinstance(signal_cov, Covariance):
        signal_cov = signal_cov.data
    if not isinstance(signal_cov, np.ndarray) or (
            not np.array_equal(signal_cov.shape,
                               np.tile(epochs_data.shape[1], 2))):
        raise ValueError('signal_cov must be None, a covariance instance, '
                         'or an array of shape (n_chans, n_chans)')

    # Get prototype events
    if events is not None:
        evokeds, toeplitzs = _least_square_evoked(
            epochs_data, events, tmin, sfreq)
    else:
        evokeds, toeplitzs = list(), list()
        for c in classes:
            # Prototyped response for each class
            evokeds.append(np.mean(epochs_data[y == c, :, :], axis=0))
            toeplitzs.append(1.)

    filters = list()
    patterns = list()
    for evo, toeplitz in zip(evokeds, toeplitzs):
        # Estimate covariance matrix of the prototype response
        evo = np.dot(evo, toeplitz)
        evo_cov = _regularized_covariance(evo, reg, method_params, info,
                                          rank='full')

        # Fit spatial filters
        try:
            evals, evecs = linalg.eigh(evo_cov, signal_cov)
        except np.linalg.LinAlgError as exp:
            raise ValueError('Could not compute eigenvalues, ensure '
                             'proper regularization (%s)' % (exp,))
        evecs = evecs[:, np.argsort(evals)[::-1]]  # sort eigenvectors
        evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
        _patterns = np.linalg.pinv(evecs.T)
        filters.append(evecs[:, :n_components].T)
        patterns.append(_patterns[:, :n_components].T)

    filters = np.concatenate(filters, axis=0)
    patterns = np.concatenate(patterns, axis=0)
    evokeds = np.array(evokeds)
    return filters, patterns, evokeds
Ejemplo n.º 32
0
def check_y_valid_values_for_pairs(y):
  """Checks that y values are in [-1, 1]"""
  if not np.array_equal(np.abs(y), np.ones_like(y)):
    raise ValueError("When training on pairs, the labels (y) should contain "
                     "only values in [-1, 1]. Found an incorrect value.")
Ejemplo n.º 33
0
def test_normalize_distribution_invalid_input(distribution):
    _, f         = distribution
    f_normalized = normalize_distribution(f)
    assert np.array_equal(f_normalized, f)
Ejemplo n.º 34
0
def losscp(list):
    newlist = np.sort(list)
    if np.array_equal(np.array(list), np.array(newlist)):
        return 1
    else:
        return 0
Ejemplo n.º 35
0
 def test_simple_grid(self):
     text_grid = self.text_grid
     result_grid = self.result_grid
     goal_building = GoalBuilding2D(text_grid)
     assert goal_building
     assert np.array_equal(goal_building.grid, result_grid)
Ejemplo n.º 36
0
                    if (k % 6) % 2 == 0:
                        if stats.mode(data_int[i], axis=None)[0][0] == 0:
                            bounds_learned[int(k / 6), k % 6] = 0
                        else:
                            bounds_learned[int(k / 6),
                                           k % 6] = np.min(data_int[i])
                    if (k % 6) % 2 != 0:
                        if stats.mode(data_int[i], axis=None)[0][0] == 0:
                            bounds_learned[int(k / 6), k % 6] = 0
                        else:
                            bounds_learned[int(k / 6),
                                           k % 6] = np.max(data_int[i])
                    k += 1
            bounds_learned = bounds_learned.astype(np.int64)

            if not np.array_equal(bounds_learned, bounds_prev):
                totSam1, fp, LeConsReject, constrRej1, constrRej2, objVal2 = gfl.generateSample2(
                    num_nurses, num_days, num_shifts, orderingNotImp, numSam,
                    num_constrType, constrList, bounds_learned, bounds)
                totSam2, fn, TrConsReject, constrRej3, constrRej4, objVal1 = gfl.generateSample1(
                    num_nurses, num_days, num_shifts, orderingNotImp, numSam,
                    num_constrType, constrList, bounds, bounds_learned)
            else:
                totSam1, fp, LeConsReject, constrRej1, constrRej2, objVal2 = totSam1_prev, fp_prev, LeConsReject_prev, constrRej1_prev, constrRej2_prev, objVal2_prev
                totSam2, fn, TrConsReject, constrRej3, constrRej4, objVal1 = totSam2_prev, fn_prev, TrConsReject_prev, constrRej3_prev, constrRej4_prev, objVal1_prev
            totSam2_prev, fn_prev, TrConsReject_prev, constrRej3_prev, constrRej4_prev, objVal1_prev = totSam2, fn, TrConsReject, constrRej3, constrRej4, objVal1
            totSam1_prev, fp_prev, LeConsReject_prev, constrRej1_prev, constrRej2_prev, objVal2_prev = totSam1, fp, LeConsReject, constrRej1, constrRej2, objVal2
            bounds_prev = bounds_learned

            row = []
            row.extend([num_nurses])
Ejemplo n.º 37
0
    def get_next_state_and_cost(self,s,u):
        cost = 0
        r_curr = self.r.copy()

        pos = s[0:2]
        res = s[2]
        s_next = s.copy()
        #extract current action
        a = np.argmax(self.get_action(u))  

        # print "current state: " + str(s)
        # print "action: " + str(a)
        # print "current resources: " + str(r_curr)
        
        if a < 4:
            #movement action
            # print "movement action"
            next_pos = self.m[a] + pos
            if next_pos[0] < 0 or next_pos[0] > 6 or next_pos[1] < 0 or next_pos[1] > 6:
                #wall collision
                return s_next,self.collision_with_wall_cost
            else:
                s_next[0:2] = next_pos
                if res:
                    cost = self.travel_with_resource_cost
                else:
                    cost = self.travel_empty_cost
                return s_next,cost
        else:
            # print "pick/place action"
            if a == 4:
                #pick action
                if res:
                    #trying to pick when full
                    return s_next,self.pick_when_full_cost
                else:
                    #check if picked in resource position
                    if any(np.array_equal(pos, k) for k in self.r):
                        # print "picked up resource"
                        #remove resource from r list  
                        for i,k in enumerate(self.r):
                            if np.array_equal(pos, k):
                                    r_curr = np.delete(r_curr,i,axis=0)
                                    break
                        
                        s_next[3:] = np.zeros(4)
                        for i,k in enumerate(r_curr):
                            if np.array_equal(self.r_perm[0], k):
                                s_next[3] = 1
                            elif np.array_equal(self.r_perm[1], k):
                                s_next[4] = 1
                            elif np.array_equal(self.r_perm[2], k):
                                s_next[5] = 1
                            elif np.array_equal(self.r_perm[3], k):
                                s_next[6] = 1
                            else:
                                pass
   
                        cost = self.pick_when_empty_cost
                        s_next[2] = 1
                    else:
                        # print "dud pick"
                        s_next[2] = 0
                        cost = self.pick_dud_cost

                    self.r = r_curr
                    
                    return s_next,cost
            else:
                #drop action
                if res:
                    #drop carrying resource
                    if np.array_equal(pos, self.home):
                        # print "dropped in home position - congrats"
                        res = 0
                        cost = self.drop_success_cost
                        s_next[2] = 0
                    else:
                        # print "dropped at wrong spot"
                        res = 0
                        #add resource to new spot
                        r_curr = np.append(r_curr,[pos],axis=0)
                        # print r_curr
                        self.r = r_curr
                        cost = self.drop_wrong_spot_cost
                        s_next[2] = 0
                    return s_next,cost
                else:
                    # print "dropped nothing"
                    cost = self.drop_nothing_cost
                    return s_next,cost
Ejemplo n.º 38
0
    def animate(self,i):
        #offset
        self.ax.clear()

        xo,yo = 0.5,0.5
        borders_x = [0,0,7,7,0]
        borders_y = [0,7,7,0,0]     
        r = self.r_sim.copy()
        s = self.sim_result[:,i]

        
        robot_x_empty = []
        robot_y_empty = []
        robot_x_full = []
        robot_y_full = []

        #robot1
        if self.s_prev[2] == 0 and s[2] == 1:
            #robot 1 picked up resource
            for j,k in enumerate(self.r_sim):
                if np.array_equal(s[0:2], k):
                    r = np.delete(r,j,axis=0)
                    break
            robot_x_full.append(s[0]+ xo)
            robot_y_full.append(s[1]+ yo)
        elif self.s_prev[2] == 1 and s[2] == 0:
            #robot1 dropped resource
            r = np.append(r,[s[0:2]],axis=0)
            robot_x_empty.append(s[0]+ xo)
            robot_y_empty.append(s[1]+ yo)
        elif self.s_prev[2] == 1 and s[2] == 1:
            robot_x_full.append(s[0]+ xo)
            robot_y_full.append(s[1]+ yo)
        elif self.s_prev[2] == 0 and s[2] == 0:
            robot_x_empty.append(s[0]+ xo)
            robot_y_empty.append(s[1]+ yo)

        self.s_prev = s.copy()
        self.r_sim = r.copy()

        r = r.T
        xr = r[0] + xo
        yr = r[1] + yo
        xh,yh = 3+xo,3+yo

        #text overlays
        t1 = "Step: " + str(i+1)
        t2 = "Robot Position(x,y): (" + str(s[0]) + "," + str(s[1]) + ")" 
        if s[2]:
            t3 = "Robot carrying resource - TRUE"
        else:
            t3 = "Robot carrying resource - FALSE"
        a = int(self.sim_control[i])

        if a == 0:
            t4 = "Next action - up"
        elif a == 1:
            t4 = "Next action - down"
        elif a == 1:
            t4 = "Next action - left"
        elif a == 1:
            t4 = "Next action - right"
        elif a == 1:
            t4 = "Next action - pick"
        else:
            t4 = "Next action - drop"

       
        
        self.ax.text(0.2, 6.7, t1, ha='left', wrap=True)
        self.ax.text(0.2, 6.5, t2, ha='left', wrap=True)
        self.ax.text(0.2, 6.3, t3, ha='left', wrap=True)
        self.ax.text(0.2, 6.1, t4, ha='left', wrap=True)

        
        self.ax.scatter(robot_x_full,robot_y_full,color='red',marker ='o',s=10**2.5,alpha = 0.2)
        self.ax.scatter(robot_x_empty,robot_y_empty,color='green',marker ='o',s=10**2.5,alpha = 0.2)
        self.ax.scatter(xh,yh,color='blue',marker ='s',s=10**3,alpha = 0.2)
        self.ax.scatter(xr,yr,color='red',alpha = 0.2)
        self.ax.plot(borders_x,borders_y)
Ejemplo n.º 39
0
def eq(x: Union[np.ndarray, list, tuple], y: Union[np.ndarray, list, tuple]) -> bool:
    """
    Determines x and y have the same shape and elements.
    If x or y is a list or tuple, we convert to numpy arrays.
    """
    return np.array_equal(np.array(x), np.array(y))
Ejemplo n.º 40
0
def equals(x: Union[np.ndarray, list, tuple], y: Union[np.ndarray, list, tuple]) -> bool:
    """
    Functional equivalent to eq without infix.
    """
    return np.array_equal(np.array(x), np.array(y))
Ejemplo n.º 41
0
    def set_probes(self,
                   probe_or_probegroup,
                   group_mode='by_probe',
                   in_place=False):
        """
        Attach a Probe to a recording.
        For this Probe.device_channel_indices is used to link contacts to recording channels.
        If some contacts of the Probe are not connected (device_channel_indices=-1)
        then the recording is "sliced" and only connected channel are kept.

        The probe order is not kept. Channel ids are re-ordered to match the channel_ids of the recording.


        Parameters
        ----------
        probe_or_probegroup: Probe, list of Probe, or ProbeGroup
            The probe(s) to be attached to the recording

        group_mode: str
            'by_probe' or 'by_shank'. Adds grouping property to the recording based on the probes ('by_probe')
            or  shanks ('by_shanks')

        in_place: bool
            False by default.
            Useful internally when extractor do self.set_probegroup(probe)

        Returns
        -------
        sub_recording: BaseRecording
            A view of the recording (ChannelSliceRecording or clone or itself)
        """
        from spikeinterface import ChannelSliceRecording

        assert group_mode in (
            'by_probe',
            'by_shank'), "'group_mode' can be 'by_probe' or 'by_shank'"

        # handle several input possibilities
        if isinstance(probe_or_probegroup, Probe):
            probegroup = ProbeGroup()
            probegroup.add_probe(probe_or_probegroup)
        elif isinstance(probe_or_probegroup, ProbeGroup):
            probegroup = probe_or_probegroup
        elif isinstance(probe_or_probegroup, list):
            assert all([isinstance(e, Probe) for e in probe_or_probegroup])
            probegroup = ProbeGroup()
            for probe in probe_or_probegroup:
                probegroup.add_probe(probe)
        else:
            raise ValueError('must give Probe or ProbeGroup or list of Probe')

        # handle not connected channels
        assert all(probe.device_channel_indices is not None for probe in probegroup.probes), \
            'Probe must have device_channel_indices'

        # this is a vector with complex fileds (dataframe like) that handle all contact attr
        arr = probegroup.to_numpy(complete=True)

        # keep only connected contact ( != -1)
        keep = arr['device_channel_indices'] >= 0
        if np.any(~keep):
            warn(
                'The given probes have unconnected contacts: they are removed')

        arr = arr[keep]
        inds = arr['device_channel_indices']
        order = np.argsort(inds)
        inds = inds[order]
        # check
        if np.max(inds) >= self.get_num_channels():
            raise ValueError(
                'The given Probe have "device_channel_indices" that do not match channel count'
            )
        new_channel_ids = self.get_channel_ids()[inds]
        arr = arr[order]
        arr['device_channel_indices'] = np.arange(arr.size, dtype='int64')

        # create recording : channel slice or clone or self
        if in_place:
            if not np.array_equal(new_channel_ids, self.get_channel_ids()):
                raise Exception(
                    'set_proce(inplace=True) must have all channel indices')
            sub_recording = self
        else:
            if np.array_equal(new_channel_ids, self.get_channel_ids()):
                sub_recording = self.clone()
            else:
                sub_recording = ChannelSliceRecording(self, new_channel_ids)

        # create a vector that handle all conatcts in property
        sub_recording.set_property('contact_vector', arr, ids=None)
        # planar_contour is saved in annotations
        for probe_index, probe in enumerate(probegroup.probes):
            contour = probe.probe_planar_contour
            if contour is not None:
                sub_recording.set_annotation(
                    f'probe_{probe_index}_planar_contour',
                    contour,
                    overwrite=True)

        # duplicate positions to "locations" property
        ndim = probegroup.ndim
        locations = np.zeros((arr.size, ndim), dtype='float64')
        for i, dim in enumerate(['x', 'y', 'z'][:ndim]):
            locations[:, i] = arr[dim]
        sub_recording.set_property('location', locations, ids=None)

        # handle groups
        groups = np.zeros(arr.size, dtype='int64')
        if group_mode == 'by_probe':
            for group, probe_index in enumerate(np.unique(arr['probe_index'])):
                mask = arr['probe_index'] == probe_index
                groups[mask] = group
        elif group_mode == 'by_shank':
            assert all(probe.shank_ids is not None for probe in probegroup.probes), \
                'shank_ids is None in probe, you cannot group by shank'
            for group, a in enumerate(
                    np.unique(arr[['probe_index', 'shank_ids']])):
                mask = (arr['probe_index'] == a['probe_index']) & (
                    arr['shank_ids'] == a['shank_ids'])
                groups[mask] = group
        sub_recording.set_property('group', groups, ids=None)

        return sub_recording
Ejemplo n.º 42
0
def evaluate(sess, enqueue_op, image_paths_placeholder, labels_placeholder,
             phase_train_placeholder, batch_size_placeholder,
             control_placeholder, embeddings, labels, image_paths,
             actual_issame, batch_size, nrof_folds, log_dir, step,
             summary_writer, stat, epoch, distance_metric, subtract_mean,
             use_flipped_images, use_fixed_image_standardization):
    start_time = time.time()
    # Run forward pass to calculate embeddings
    print('Runnning forward pass on LFW images')

    # Enqueue one epoch of image paths and labels
    nrof_embeddings = len(
        actual_issame) * 2  # nrof_pairs * nrof_images_per_pair
    nrof_flips = 2 if use_flipped_images else 1
    nrof_images = nrof_embeddings * nrof_flips
    labels_array = np.expand_dims(np.arange(0, nrof_images), 1)
    image_paths_array = np.expand_dims(
        np.repeat(np.array(image_paths), nrof_flips), 1)
    control_array = np.zeros_like(labels_array, np.int32)
    if use_fixed_image_standardization:
        control_array += np.ones_like(
            labels_array) * facenet.FIXED_STANDARDIZATION
    if use_flipped_images:
        # Flip every second image
        control_array += (labels_array % 2) * facenet.FLIP
    sess.run(
        enqueue_op, {
            image_paths_placeholder: image_paths_array,
            labels_placeholder: labels_array,
            control_placeholder: control_array
        })

    embedding_size = int(embeddings.get_shape()[1])
    assert nrof_images % batch_size == 0, 'The number of LFW images must be an integer multiple of the LFW batch size'
    nrof_batches = nrof_images // batch_size
    emb_array = np.zeros((nrof_images, embedding_size))
    lab_array = np.zeros((nrof_images, ))
    for i in range(nrof_batches):
        feed_dict = {
            phase_train_placeholder: False,
            batch_size_placeholder: batch_size
        }
        emb, lab = sess.run([embeddings, labels], feed_dict=feed_dict)
        lab_array[lab] = lab
        emb_array[lab, :] = emb
        if i % 10 == 9:
            print('.', end='')
            sys.stdout.flush()
    print('')
    embeddings = np.zeros((nrof_embeddings, embedding_size * nrof_flips))
    if use_flipped_images:
        # Concatenate embeddings for flipped and non flipped version of the images
        embeddings[:, :embedding_size] = emb_array[0::2, :]
        embeddings[:, embedding_size:] = emb_array[1::2, :]
    else:
        embeddings = emb_array

    assert np.array_equal(
        lab_array, np.arange(nrof_images)
    ) == True, 'Wrong labels used for evaluation, possibly caused by training examples left in the input pipeline'
    _, _, accuracy, val, val_std, far = lfw.evaluate(
        embeddings,
        actual_issame,
        nrof_folds=nrof_folds,
        distance_metric=distance_metric,
        subtract_mean=subtract_mean)

    print('Accuracy: %2.5f+-%2.5f' % (np.mean(accuracy), np.std(accuracy)))
    print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))
    lfw_time = time.time() - start_time
    # Add validation loss and accuracy to summary
    summary = tf.Summary()
    #pylint: disable=maybe-no-member
    summary.value.add(tag='lfw/accuracy', simple_value=np.mean(accuracy))
    summary.value.add(tag='lfw/val_rate', simple_value=val)
    summary.value.add(tag='time/lfw', simple_value=lfw_time)
    summary_writer.add_summary(summary, step)
    with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as f:
        f.write('%d\t%.5f\t%.5f\n' % (step, np.mean(accuracy), val))
    stat['lfw_accuracy'][epoch - 1] = np.mean(accuracy)
    stat['lfw_valrate'][epoch - 1] = val
Ejemplo n.º 43
0
def test_augmentations_wont_change_float_input(augmentation_cls, params,
                                               float_image):
    float_image_copy = float_image.copy()
    aug = augmentation_cls(p=1, **params)
    aug(image=float_image)
    assert np.array_equal(float_image, float_image_copy)
def find_storm_objects(
        all_storm_ids, all_times_unix_sec, storm_ids_to_keep,
        times_to_keep_unix_sec, allow_missing=False):
    """Finds storm objects.

    N = total number of storm objects
    n = number of storm objects to keep

    :param all_storm_ids: length-N list of storm IDs (strings).
    :param all_times_unix_sec: length-N numpy array of valid times.
    :param storm_ids_to_keep: length-n list of storm IDs (strings).
    :param times_to_keep_unix_sec: length-n numpy array of valid times.
    :param allow_missing: Boolean flag.  If True, this method will allow storm
        objects to be missing (i.e., some objects defined by `storm_ids_to_keep`
        and `storm_ids_to_keep` may not be present in `all_storm_ids` and
        `all_times_unix_sec`).  If False, this method will error out if it finds
        missing objects.
    :return: relevant_indices: length-n numpy array of indices.
        [all_storm_ids[k] for k in relevant_indices] = storm_ids_to_keep
        all_times_unix_sec[relevant_indices] = times_to_keep_unix_sec
    :raises: ValueError: if `all_storm_ids` and `all_times_unix_sec` contain any
        duplicate pairs.
    :raises: ValueError: if any desired storm object is not found.
    """

    error_checking.assert_is_boolean(allow_missing)

    error_checking.assert_is_numpy_array(
        numpy.array(all_storm_ids), num_dimensions=1)
    num_storm_objects_total = len(all_storm_ids)
    error_checking.assert_is_numpy_array(
        all_times_unix_sec,
        exact_dimensions=numpy.array([num_storm_objects_total]))

    error_checking.assert_is_numpy_array(
        numpy.array(storm_ids_to_keep), num_dimensions=1)
    num_storm_objects_to_keep = len(storm_ids_to_keep)
    error_checking.assert_is_numpy_array(
        times_to_keep_unix_sec,
        exact_dimensions=numpy.array([num_storm_objects_to_keep]))

    all_object_ids = [
        '{0:s}_{1:d}'.format(all_storm_ids[i], all_times_unix_sec[i])
        for i in range(num_storm_objects_total)]

    object_ids_to_keep = [
        '{0:s}_{1:d}'.format(storm_ids_to_keep[i],
                             times_to_keep_unix_sec[i])
        for i in range(num_storm_objects_to_keep)]

    this_num_unique = len(set(all_object_ids))
    if this_num_unique != len(all_object_ids):
        error_string = (
            'Only {0:d} of {1:d} original storm objects are unique.'
        ).format(this_num_unique, len(all_object_ids))
        raise ValueError(error_string)

    all_object_ids_numpy = numpy.array(all_object_ids, dtype='object')
    object_ids_to_keep_numpy = numpy.array(object_ids_to_keep, dtype='object')

    sort_indices = numpy.argsort(all_object_ids_numpy)
    relevant_indices = numpy.searchsorted(
        all_object_ids_numpy[sort_indices], object_ids_to_keep_numpy,
        side='left'
    ).astype(int)

    relevant_indices[relevant_indices < 0] = 0
    relevant_indices[
        relevant_indices >= len(all_object_ids_numpy)
    ] = len(all_object_ids_numpy) - 1
    relevant_indices = sort_indices[relevant_indices]

    if allow_missing:
        bad_indices = numpy.where(
            all_object_ids_numpy[relevant_indices] != object_ids_to_keep_numpy
        )[0]
        relevant_indices[bad_indices] = -1
        return relevant_indices

    if not numpy.array_equal(all_object_ids_numpy[relevant_indices],
                             object_ids_to_keep_numpy):
        missing_object_flags = (
            all_object_ids_numpy[relevant_indices] != object_ids_to_keep_numpy)

        error_string = (
            '{0:d} of {1:d} desired storm objects are missing.  Their ID-time '
            'pairs are listed below.\n{2:s}'
        ).format(numpy.sum(missing_object_flags), num_storm_objects_to_keep,
                 str(object_ids_to_keep_numpy[missing_object_flags]))
        raise ValueError(error_string)

    return relevant_indices
    def fit_decomposition_method(self, training_set_inputs, training_set_outputs, test_set_inputs, test_set_outputs, max_number_of_training_iterations = 1000, verbose = False):        
        print()
        print("Optimizing the neural network...")
        # Reinitializing the number of function and gradient evaluations 
        self.NbrFuncEval = 0
        self.NbrGradEval = 0
        self.NormGradAtOptimalPoint = 0
        # Setting the number of inputs per neuron in the hidden layer
        self.number_of_inputs_per_neuron = training_set_inputs.shape[1]
        # Getting the training set size
        P = len(training_set_inputs)
        # Getting the number of neurons in the hidden layer
        N = self.hidden_layer_sizes
        # Getting the value of the regularization parameter rho to use on the global error computation
        rho = self.rho
        #getting the value of the spread in the activation function sigma
        sigma = self.sigma
        #getting the value of the solver
        solver = self.solver
        # Input data
        X = training_set_inputs
        # Output data
        Y = training_set_outputs
        # Preparing the initial guesses for the parameters to be minimized
        self.__random_start()
        # Computing the initial output on training data
        self.result_initial_output_train = self.predict(training_set_inputs)
        start_time = time.time()
        # Starting the two-block decomposition method optimization of the MLP
        # Layer1 to layer2 weights
        v = np.asfarray(self.synaptic_weights_output_layer).flatten()
        # Inputs to layer1 weights
        w = np.asfarray(self.synaptic_weights_hidden_layer)
        # Layer1 noises
        b = list(np.asfarray(self.noises).flatten())
        result_outputs_test = self.predict(test_set_inputs)
        test_error = self.get_error(result_outputs_test, test_set_outputs)
        if (verbose):
            print ()
            print("Initial v: ", v)
            print()
            print("Initial w: ", w)
            print()
            print("Initial b: ", b)
            print()
            print("Initial test error: ", test_error)    
        i = 1
        flag = 1
        nfe = 0
        nje = 0
        err_count = 0
        best_error_and_iteration = [float("inf"), 0]
        while (flag and (i <= 100)):
            if (verbose):
                print()
                print("Iteration ", i)
            
            # Step1: minimization with respect to v
            optimized1 = minimize(self.__error_extreme, v, args=(w, b, X, Y, N, P, sigma, rho), method = solver, options=dict({'maxiter':max_number_of_training_iterations}))
            nfe += optimized1.nfev
            nje += optimized1.njev
            new_v = optimized1.x
            omega = np.asarray(b + list(np.asfarray(w).flatten()))
            
            # Step2: minimization with respect to c
            optimized2 = minimize(self.__error_extreme2, omega, args=(new_v, X, Y, N, P, sigma, rho), method = solver, options=dict({'maxiter':max_number_of_training_iterations}))
            nfe += optimized2.nfev
            nje += optimized2.njev
            result = optimized2.x
            
            # Update the model's weights but befor doing so we save them 
            #in order to set them back in case we are dealing with an increase in weights
            new_w = result[self.hidden_layer_sizes:].reshape(self.number_of_inputs_per_neuron, self.hidden_layer_sizes)
            new_b = result[:self.hidden_layer_sizes]
            old_w = self.synaptic_weights_hidden_layer
            old_v = self.synaptic_weights_output_layer
            old_b = self.noises
            self.synaptic_weights_hidden_layer = new_w
            self.synaptic_weights_output_layer = new_v
            self.noises = new_b
            
            # Computing the new error and comparing with the old one. we update the weights if and only if we get an improvment in error
            result_outputs_test = self.predict(test_set_inputs)
            new_test_error = self.get_error(result_outputs_test, test_set_outputs)
            
            if (verbose):
                print ()
                print("        new v: ", new_v)
                print()
                print("        new w: ", new_w)
                print()
                print("        new w: ", new_b)
                print()
                print("        new test error: ", new_test_error)
            
            # If the error increses or stayed constant for more than 4 times stop training: Early stopping method.
            if ((np.array_equal(new_v, v) and np.array_equal(new_w, w) and np.array_equal(new_b, b)) or err_count == 3):
                flag = 0
            
            # Increment this counter if the error increases or stays constant
            if (new_test_error >= test_error):
                err_count += 1
            # Reset this counter to 0 if the error decreases
            else:
                err_count = 0
            
            # Decide if the actual situation is better than the previous or not
            if (new_test_error < best_error_and_iteration[0]):
                best_error_and_iteration[0] = new_test_error
                best_error_and_iteration[1] = i
            # If it is not, put back the old weights
            else:
                self.synaptic_weights_hidden_layer = old_w
                self.synaptic_weights_output_layer = old_v
                self.noises = old_b
            
            v = new_v
            w = new_w
            b = list(new_b.flatten())
            test_error = new_test_error
            i += 1

        if (verbose):
            print ()
            print ()
            print ("Best computed error: ", best_error_and_iteration[0])
            print ()
            print ("Best computed error iteration: ", best_error_and_iteration[1])
            print ()
            
        self.training_time = time.time() - start_time
        self.NbrFuncEval = nfe
        self.NbrGradEval = nje
        self.NormGradAtOptimalPoint = linalg.norm(optimized2.jac)
        self.NbrOuterIter = i-1
Ejemplo n.º 46
0
def test_imgaug_image_only_augmentations(augmentation_cls, image, mask):
    aug = augmentation_cls(p=1)
    data = aug(image=image, mask=mask)
    assert data["image"].dtype == np.uint8
    assert data["mask"].dtype == np.uint8
    assert np.array_equal(data["mask"], mask)
Ejemplo n.º 47
0
def compare(array1, array2, name="output"):
    diffs = np.where(array1 - array2 != 0)[0]
    print(f"indexes of {name}: {len(diffs)} differences: {diffs}")
    assert(np.array_equal(array1, array2))
Ejemplo n.º 48
0
    def test_empty_input(self):
        audio = np.array([])
        aug = naa.MaskAug(sampling_rate=44100)
        augmented_audio = aug.augment(audio)

        self.assertTrue(np.array_equal(audio, augmented_audio))
def om_validate3A(data_3A, predK, mask=None):
    """Constructs a matrix that describes the relationship between the clusters
    :param data_3A: inputted data for subchallenge 3A
    :param predK: number of clusters
    :param mask: mask used
    :return:
    """
    # read in the data
    data_3A = data_3A.split('\n')
    data_3A = filter(None, data_3A)
    if len(data_3A) != predK:
        raise ValidationError("Input file contains a different number of lines (%d) than expected (%d)")
    data_3A = [x.split('\t') for x in data_3A]
    for i in range(len(data_3A)):
        if len(data_3A[i]) != 2:
            raise ValidationError("Number of tab separated columns in line %d is not 2" % (i+1))
        try:
            data_3A[i][0] = int(data_3A[i][0])
            data_3A[i][1] = int(data_3A[i][1])
        except ValueError:
            raise ValidationError("Entry in line %d could not be cast as integer" % (i+1))

    if [x[0] for x in data_3A] != range(1, predK+1):
        raise ValidationError("First column must have %d entries in acending order starting with 1" % predK)

    for i in range(len(data_3A)):
        if data_3A[i][1] not in set(range(predK+1)):
            raise ValidationError("Parent node label in line %d is not valid." % (i+1))

    # Since cluster zero is not included in file
    ad_cluster = np.zeros((len(data_3A)+1, len(data_3A)+1), dtype=int)
    # file starts at one
    for i in range(len(data_3A)):
        ad_cluster[data_3A[i][1]][data_3A[i][0]] = 1
    # fill in a matrix which tells you whether or not one cluster is a descendant of another
    for i in range(len(data_3A)+1):
        for j in range(len(data_3A)+1):
            if (ad_cluster[j][i] == 1):
                for k in range(len(data_3A)+1):
                    if(ad_cluster[k][j] == 1):
                        ad_cluster[k][i] = 1
                    if(ad_cluster[i][k] == 1):
                        ad_cluster[j][k] = 1

    # check if all nodes are connected. If there are not, we could possibly run the above code again 
    if (not np.array_equal(np.nonzero(ad_cluster[0])[0], map(lambda x: x+1, range(len(data_3A))))):
        for i in range(len(data_3A)+1):
            for j in range(len(data_3A)+1):
                if (ad_cluster[j][i] == 1):
                    for k in range(len(data_3A)+1):
                        if(ad_cluster[k][j] == 1):
                            ad_cluster[k][i] = 1
                        if(ad_cluster[i][k] == 1):
                            ad_cluster[j][k] = 1

    if (not np.array_equal(np.nonzero(ad_cluster[0])[0], map(lambda x: x+1, range(len(data_3A))))):
        raise ValidationError("Root of phylogeny not ancestor of all clusters / Tree is not connected.")

    # print ad_cluster
    ad_cluster = np.delete(ad_cluster, 0, 0)
    ad_cluster = np.delete(ad_cluster, 0, 1)

    return ad_cluster
Ejemplo n.º 50
0
def run_pipe_with_loader_ts(cache_loc=None):

    steps = []

    # Loader - transform (5, 2) to (5, 8)
    # as each DataFile contains np.zeros((2, 2))
    file_mapping = get_fake_mapping(100)


    loader = BPtLoader(estimator=Identity(),
                       inds=[0, 1],
                       file_mapping=file_mapping,
                       n_jobs=1,
                       fix_n_jobs=False,
                       cache_loc=None)
    steps.append(('loader', loader))

    # Add transformer to ones
    # input here should be (5, 8) of real val, original
    # inds of 0 should work on half
    to_ones = ToFixedTransformer(to=1)
    st = ScopeTransformer(estimator=to_ones, inds=[0])
    steps.append(('to_ones', st))

    # Add basic linear regression model
    # Original inds should work on all
    model = BPtModel(estimator=LinearRegression(), inds=[0, 1])
    param_dists = {'estimator__fit_intercept': Choice([True, False])}
    search_model = NevergradSearchCV(estimator=model,
                                     ps=get_param_search(),
                                     param_distributions=param_dists)

    steps.append(('model', search_model))

    # Create pipe
    pipe = BPtPipeline(steps=steps,
                       cache_loc=cache_loc)

    X = np.arange(100).reshape((50, 2))
    y = np.ones(50)

    pipe.fit(X, y, fit_index=np.arange(50))

    # Make sure fit worked correctly
    assert pipe[0].n_features_in_ == 2
    assert pipe[1].n_features_in_ == 8
    assert pipe[1].estimator_.n_features_in_ == 4
    assert len(pipe.mapping_[0]) == 4
    assert len(pipe.mapping_[1]) == 4
    assert 7 in pipe.mapping_[1]

    # Make sure reverse transform works
    X_df = pd.DataFrame(X)

    X_trans = pipe.transform_df(X_df)

    assert X_trans.shape == (50, 8)
    assert X_trans.loc[4, '1_3'] == 9
    assert X_trans.loc[1, '1_2'] == 3
    assert X_trans.loc[4, '0_0'] == 1
    assert X_trans.loc[0, '0_0'] == 1

    # Make sure predict works,
    # seems safe to assume model
    # can learn to predict 1's
    # as all targets are 1's.
    # but may need to change?
    preds = pipe.predict(X)
    assert np.all(preds > .99)

    # Check bpt pipeline coef attribute
    assert np.array_equal(pipe[-1].best_estimator_.coef_,
                          pipe.coef_)

    # Clean fake file mapping
    clean_fake_mapping(100)

    return pipe
Ejemplo n.º 51
0
def compare_images(expected, actual, tol, in_decorator=False):
    """
    Compare two "image" files checking differences within a tolerance.

    The two given filenames may point to files which are convertible to
    PNG via the `.converter` dictionary. The underlying RMS is calculated
    with the `.calculate_rms` function.

    Parameters
    ----------
    expected : str
        The filename of the expected image.
    actual : str
        The filename of the actual image.
    tol : float
        The tolerance (a color value difference, where 255 is the
        maximal difference).  The test fails if the average pixel
        difference is greater than this value.
    in_decorator : bool
        Determines the output format. If called from image_comparison
        decorator, this should be True. (default=False)

    Returns
    -------
    comparison_result : None or dict or str
        Return *None* if the images are equal within the given tolerance.

        If the images differ, the return value depends on  *in_decorator*.
        If *in_decorator* is true, a dict with the following entries is
        returned:

        - *rms*: The RMS of the image difference.
        - *expected*: The filename of the expected image.
        - *actual*: The filename of the actual image.
        - *diff_image*: The filename of the difference image.
        - *tol*: The comparison tolerance.

        Otherwise, a human-readable multi-line string representation of this
        information is returned.

    Examples
    --------
    ::

        img1 = "./baseline/plot.png"
        img2 = "./output/plot.png"
        compare_images(img1, img2, 0.001)

    """
    from matplotlib import _png

    if not os.path.exists(actual):
        raise Exception("Output image %s does not exist." % actual)

    if os.stat(actual).st_size == 0:
        raise Exception("Output image file %s is empty." % actual)

    # Convert the image to png
    extension = expected.split('.')[-1]

    if not os.path.exists(expected):
        raise IOError('Baseline image %r does not exist.' % expected)

    if extension != 'png':
        actual = convert(actual, False)
        expected = convert(expected, True)

    # open the image files and remove the alpha channel (if it exists)
    expected_image = _png.read_png_int(expected)
    actual_image = _png.read_png_int(actual)
    expected_image = expected_image[:, :, :3]
    actual_image = actual_image[:, :, :3]

    actual_image, expected_image = crop_to_same(actual, actual_image, expected,
                                                expected_image)

    diff_image = make_test_filename(actual, 'failed-diff')

    if tol <= 0:
        if np.array_equal(expected_image, actual_image):
            return None

    # convert to signed integers, so that the images can be subtracted without
    # overflow
    expected_image = expected_image.astype(np.int16)
    actual_image = actual_image.astype(np.int16)

    rms = calculate_rms(expected_image, actual_image)

    if rms <= tol:
        return None

    save_diff_image(expected, actual, diff_image)

    results = dict(rms=rms,
                   expected=str(expected),
                   actual=str(actual),
                   diff=str(diff_image),
                   tol=tol)

    if not in_decorator:
        # Then the results should be a string suitable for stdout.
        template = [
            'Error: Image files did not match.',
            'RMS Value: {rms}',
            'Expected:  \n    {expected}',
            'Actual:    \n    {actual}',
            'Difference:\n    {diff}',
            'Tolerance: \n    {tol}',
        ]
        results = '\n  '.join([line.format(**results) for line in template])
    return results
Ejemplo n.º 52
0
"""Validate that the output produced by integration testing on 'label-maker  package' matches our expectations"""
import numpy as np

data = np.load('integration/data.npz')
# validate our image data with sums and shapes
assert np.sum(data['x_train']) == 144752757
assert np.sum(data['x_test']) == 52758414
assert data['x_train'].shape == (6, 256, 256, 3)
assert data['x_test'].shape == (2, 256, 256, 3)

# validate our label data with exact matches
expected_y_train = np.array(
    [[0, 0, 0, 0, 0, 0, 1],
     [0, 0, 0, 0, 0, 0, 1],
     [0, 0, 0, 0, 0, 0, 1],
     [0, 1, 1, 0, 0, 0, 1],
     [0, 0, 0, 0, 1, 1, 1],
     [0, 0, 0, 0, 0, 0, 1]]
)
assert np.array_equal(data['y_train'], expected_y_train)

expected_y_test = np.array(
    [[0, 0, 0, 0, 0, 0, 1],
     [0, 0, 0, 0, 0, 0, 1]]
)
assert np.array_equal(data['y_test'], expected_y_test)
Ejemplo n.º 53
0
 def testCalculateFirstIntentStepRatio(self, goalList, groundTruthRatio):
     # pass
     firstIntentStepRatio = calculateFirstIntentStepRatio(goalList)
     truthValue = np.array_equal(firstIntentStepRatio, groundTruthRatio)
     self.assertTrue(truthValue)
Ejemplo n.º 54
0
def arreq_in_list(myarr, list_arrays):
	# funtion to check if the myarr exists in the liust_arraus
    return next((True for elem in list_arrays if np.array_equal(elem, myarr)), False)
Ejemplo n.º 55
0
 def testCalculateFinalGoal(self, bean1GridX, bean1GridY, trajectory,
                            groundTruthAnswer):
     # pass
     finalGoal = calculateFinalGoal(bean1GridX, bean1GridY, trajectory)
     truthValue = np.array_equal(finalGoal, groundTruthAnswer)
     self.assertTrue(truthValue)
Ejemplo n.º 56
0
def maybe_cast_to_integer_array(arr, dtype, copy=False):
    """
    Takes any dtype and returns the casted version, raising for when data is
    incompatible with integer/unsigned integer dtypes.

    .. versionadded:: 0.24.0

    Parameters
    ----------
    arr : array-like
        The array to cast.
    dtype : str, np.dtype
        The integer dtype to cast the array to.
    copy: boolean, default False
        Whether to make a copy of the array before returning.

    Returns
    -------
    int_arr : ndarray
        An array of integer or unsigned integer dtype

    Raises
    ------
    OverflowError : the dtype is incompatible with the data
    ValueError : loss of precision has occurred during casting

    Examples
    --------
    If you try to coerce negative values to unsigned integers, it raises:

    >>> Series([-1], dtype="uint64")
    Traceback (most recent call last):
        ...
    OverflowError: Trying to coerce negative values to unsigned integers

    Also, if you try to coerce float values to integers, it raises:

    >>> Series([1, 2, 3.5], dtype="int64")
    Traceback (most recent call last):
        ...
    ValueError: Trying to coerce float values to integers
    """

    try:
        if not hasattr(arr, "astype"):
            casted = np.array(arr, dtype=dtype, copy=copy)
        else:
            casted = arr.astype(dtype, copy=copy)
    except OverflowError:
        raise OverflowError("The elements provided in the data cannot all be "
                            "casted to the dtype {dtype}".format(dtype=dtype))

    if np.array_equal(arr, casted):
        return casted

    # We do this casting to allow for proper
    # data and dtype checking.
    #
    # We didn't do this earlier because NumPy
    # doesn't handle `uint64` correctly.
    arr = np.asarray(arr)

    if is_unsigned_integer_dtype(dtype) and (arr < 0).any():
        raise OverflowError("Trying to coerce negative values "
                            "to unsigned integers")

    if is_integer_dtype(dtype) and (is_float_dtype(arr) or
                                    is_object_dtype(arr)):
        raise ValueError("Trying to coerce float values to integers")
Ejemplo n.º 57
0
# %timeit triads_classification_tree()

# Compatibility with several conventions
triad_order_bct = 3 + np.array([1, 0, 2, 5, 3, 4, 6, 10, 7, 8, 9, 11, 12
                                ])  # j.neuroimage.2009.10.003
triad_order_egger2014 = 3 + np.array(
    [12, 6, 11, 8, 9, 10, 3, 7, 0, 4, 5, 1, 2])  # fnana.2014.00129
triad_order_nn4576 = 3 + np.arange(13)  # nn.4576


def index_all(elements, array):
    return np.array([np.where(array == x)[0][0] for x in elements])


conv_triad_order_nn4576_to_bct = index_all(triad_order_bct, triad_order_nn4576)
assert np.array_equal(triad_order_nn4576[conv_triad_order_nn4576_to_bct],
                      triad_order_bct)
conv_triad_order_nn4576_to_egger2014 = index_all(triad_order_egger2014,
                                                 triad_order_nn4576)
assert np.array_equal(triad_order_nn4576[conv_triad_order_nn4576_to_egger2014],
                      triad_order_egger2014)


def identify_triad_node_roles():
    triads = triad_patterns()

    node_roles = []
    for i in range(len(triads)):
        triad = triads[i]

        triad_node_roles = [0, 1, 2]
        if np.array_equal(triad, triad[np.ix_([1, 0, 2], [1, 0, 2])]):
Ejemplo n.º 58
0
 def testCalculateFirstIntentGoalAccord(self, goalList, groundTruthAnswer):
     # pass
     isFirstIntentGoalAccord = calculateFirstIntentGoalAccord(goalList)
     truthValue = np.array_equal(isFirstIntentGoalAccord, groundTruthAnswer)
     self.assertTrue(truthValue)
Ejemplo n.º 59
0
obpStream = ioWrite.Open('HeatMap2D_py.bp', adios2.Mode.Write)
obpStream.Put(varTemperature, temperatures)
obpStream.Close()


if rank == 0:
    # ADIOS2 read
    ioRead = adios.DeclareIO("ioReader")
    ibpStream = ioRead.Open('HeatMap2D_py.bp', adios2.Mode.Read, MPI.COMM_SELF)
    var_inTemperature = ioRead.InquireVariable("temperature2D")

    if(var_inTemperature is False):
        raise ValueError('var_inTemperature is False')

    assert var_inTemperature is not None
    readOffset = [2, 2]
    readSize = [4, 4]

    var_inTemperature.SetSelection([readOffset, readSize])
    inTemperatures = np.zeros(readSize, dtype=np.int)
    ibpStream.Get(var_inTemperature, inTemperatures, adios2.Mode.Sync)
    ibpStream.Close()

    # print('Incoming temperature map\n', inTemperatures)
    expected = np.array([[22, 23, 24, 25],
                         [32, 33, 34, 35],
                         [42, 43, 44, 45],
                         [52, 53, 54, 55]], np.int)
    assert np.array_equal(inTemperatures, expected)
Ejemplo n.º 60
0
 def testCalculateGoalCommit(self, goalList, groundTruthAnswer):
     # pass
     isGoalCommit = calculateGoalCommit(goalList)
     truthValue = np.array_equal(isGoalCommit, groundTruthAnswer)
     self.assertTrue(truthValue)