Exemplo n.º 1
0
 def test_copy(self):
     g = self.Grid
     h = g.copy()
     npt.assert_(np.array_equiv(g.domain[-1] - h.domain[-1], 0))
     g.domain[-1] += 1
     npt.assert_(not np.array_equiv(g.domain[-1] - h.domain[-1], 0))
     return 0
Exemplo n.º 2
0
def test_RGB():
    """
    Test multi channel light class
    """
    position = 10, 10
    size = 50, 50
    channels = 3
    light = pylights.MultiLight('single_test', position, size, channels=channels)
    old_value = np.array([0.8, 0.3, 0.6])
    new_value = np.array([0.1, 0.1, 0.1])
    light.set(old_value, False)
    assert np.array_equiv(light.value_current, old_value)
    assert np.array_equiv(light.value_target, old_value)
    assert np.array_equiv(light.value_output, np.array([0, 0, 0]))
    light.set(new_value)
    assert np.array_equiv(light.value_current, old_value)
    assert np.array_equiv(light.value_target, new_value)
    assert np.array_equiv(light.value_output, np.array([0, 0, 0]))
    light.update()
    temp = (old_value + light.damping * (new_value - old_value))
    assert np.array_equiv(light.value_current, temp)
    assert np.array_equiv(light.value_target, new_value)
    assert np.array_equiv(light.value_output, (old_value * 255.0).astype(int))
    light.update()
    assert np.array_equiv(light.value_output, (temp * 255.0).astype(int))
  def __testSequences(self, device, module):
    module = str(module)
    # Basic Interface: Currently supports read of all sequences only
    #device.write("", "WORD_ADC_ENA", 1)
    # Arrange the data on the card:
    predefinedSequence = numpy.array([0x00010000,
                                      0x00030002,
                                      0x00050004,
                                      0x00070006,
                                      0x00090008,
                                      0x000b000a,
                                      0x000d000c,
                                      0x000f000e,
                                      0x00110010,
                                      0x00130012,
                                      0x00150014,
                                      0x00170016,
                                      0x00ff0018], dtype=numpy.int32)
    device.write_raw(module, 'AREA_DMAABLE', predefinedSequence)

    expectedMatrix = numpy.array([[0,  1,  2,  3],
                                  [4,  5,  6,  7],
                                  [8, 9, 10, 11],
                                  [12, 13, 14, 15],
                                  [16, 17, 18, 19],
                                  [20, 21, 22, 23]], dtype=numpy.float32)
    readInMatrix = device.read_sequences(module, 'DMA')
    self.assertTrue(numpy.array_equiv(readInMatrix, expectedMatrix))
    self.assertTrue(readInMatrix.dtype == numpy.float32)
    readInMatrix = device.read_sequences(registerPath = '/' + str(module)+ '/DMA')
    self.assertTrue(numpy.array_equiv(readInMatrix, expectedMatrix))
    self.assertTrue(readInMatrix.dtype == numpy.float32)
Exemplo n.º 4
0
    def bin2d_cvt_equal_mass(self, wvt=None, verbose=1) :
        """
        Produce a CV Tesselation

        wvt: default is None (will use preset value, see self.wvt)
        """

        ## Reset the status and statusnode for all nodes
        self.status = np.zeros(self.npix, dtype=Nint)
        self.statusnode = np.arange(self.xnode.size) + 1

        if wvt is not None : self.wvt = wvt
        if self.wvt : self.weight = np.ones_like(self.data)
        else : self.weight = self.data**4

        self.scale = 1.0

        self.niter = 0
        ## WHILE LOOP: stop when the nodes do not move anymore ============
        Oldxnode, Oldynode = copy.copy(self.xnode[-1]), copy.copy(self.ynode[-1])
        while (not np.array_equiv(self.xnode, Oldxnode)) | (not np.array_equiv(self.ynode, Oldynode)):
            Oldxnode, Oldynode = copy.copy(self.xnode), copy.copy(self.ynode)
            ## Assign the closest centroid to each bin
            self.bin2d_assign_bins()

            ## New nodes weighted centroids
            self.bin2d_weighted_centroid()

            ## Eq. (4) of Diehl & Statler (2006)
            if self.wvt : self.scale = sqrt(self.Areanode/self.flux_node)
            self.niter += 1
Exemplo n.º 5
0
    def equiv(self, other):
        """Test if other is an equivalent weighting.

        Returns
        -------
        equivalent : `bool`
            `True` if other is a `WeightingBase` instance with the same
            `WeightingBase.impl`, which yields the same result as this
            weighting for any input, `False` otherwise. This is checked
            by entry-wise comparison of matrices/vectors/constants.
        """
        # Optimization for equality
        if self == other:
            return True

        elif self.exponent != getattr(other, 'exponent', -1):
            return False

        elif isinstance(other, MatrixWeightingBase):
            if self.matrix.shape != other.matrix.shape:
                return False

            if self.matrix_issparse:
                if other.matrix_issparse:
                    # Optimization for different number of nonzero elements
                    if self.matrix.nnz != other.matrix.nnz:
                        return False
                    else:
                        # Most efficient out-of-the-box comparison
                        return (self.matrix != other.matrix).nnz == 0
                else:  # Worst case: compare against dense matrix
                    return np.array_equal(self.matrix.todense(), other.matrix)

            else:  # matrix of `self` is dense
                if other.matrix_issparse:
                    return np.array_equal(self.matrix, other.matrix.todense())
                else:
                    return np.array_equal(self.matrix, other.matrix)

        elif isinstance(other, VectorWeightingBase):
            if self.matrix_issparse:
                return (np.array_equiv(self.matrix.diagonal(),
                                       other.vector) and
                        np.array_equal(self.matrix.asformat('dia').offsets,
                                       np.array([0])))
            else:
                return np.array_equal(
                    self.matrix, other.vector * np.eye(self.matrix.shape[0]))

        elif isinstance(other, ConstWeightingBase):
            if self.matrix_issparse:
                return (np.array_equiv(self.matrix.diagonal(), other.const) and
                        np.array_equal(self.matrix.asformat('dia').offsets,
                                       np.array([0])))
            else:
                return np.array_equal(
                    self.matrix, other.const * np.eye(self.matrix.shape[0]))
        else:
            return False
Exemplo n.º 6
0
def test_from_file_with_indices():
    sample = Sample.from_file(from_current_dir('sample2.xlsx'), [0, 4])
    expected_attributes = array([[1], [5]])
    expected_categories = array([1, 5])
    expected_columns = ["Age"]
    assert array_equiv(sample.attributes, expected_attributes)
    assert array_equiv(sample.categories, expected_categories)
    assert sample.columns == expected_columns
Exemplo n.º 7
0
def test_xls_contents():
    sample = Sample.from_file(from_current_dir('sample2.xlsx'))
    expected_attributes = array([[1], [2], [3], [4], [5]])
    expected_categories = array([1, 2, 3, 4, 5])
    expected_columns = ["Age"]
    assert array_equiv(sample.attributes, expected_attributes)
    assert array_equiv(sample.categories, expected_categories)
    assert sample.columns == expected_columns
 def test_equivalent(self):
     """
     Returns True if input arrays are shape consistent and all elements equal.
     Shape consistent means they are either the same shape, 
     or one input array can be broadcasted to create the same shape as the other one.
     """
     a = [1,2]
     b = np.asarray(a)
     self.assertTrue(np.array_equiv(a, b))
     self.assertTrue(np.array_equiv(a,[a,a]))
Exemplo n.º 9
0
def test_remove_existing_column():
    sample = Sample.from_file(from_current_dir('sample3.xlsx'))
    sample.remove_column('Two')

    expected_attributes = array([[1, 3], [1, 3]])
    expected_categories = array([0, 0])
    expected_columns = ['One', 'Three']
    assert array_equiv(sample.attributes, expected_attributes)
    assert array_equiv(sample.categories, expected_categories)
    assert sample.columns == expected_columns
Exemplo n.º 10
0
def test_transform():
    sample = Sample.from_file(from_current_dir('sample3.xlsx'))
    sample.merge_columns(['One', 'Three'], TestClusterer(), 'New')

    expected_attributes = array([[2, 0], [2, 0]])
    expected_categories = array([0, 0])
    expected_columns = ['Two', 'New']
    assert array_equiv(sample.attributes, expected_attributes)
    assert array_equiv(sample.categories, expected_categories)
    assert sample.columns == expected_columns
Exemplo n.º 11
0
def test_normalize_existing_column():
    sample = Sample.from_file(from_current_dir('sample2.xlsx'))
    normalizer = sample.get_normalizer((0.0, 1.0))
    sample.normalize(normalizer, ["Age"])

    expected_attributes = array([[0.0], [0.25], [0.5], [0.75], [1.0]])
    expected_categories = array([1, 2, 3, 4, 5])
    expected_columns = ["Age"]
    assert array_equiv(sample.attributes, expected_attributes)
    assert array_equiv(sample.categories, expected_categories)
    assert sample.columns == expected_columns
Exemplo n.º 12
0
def _checkSameExperimentResults(exp1, exp2):
    """ Returns False if experiments gave same results, true if they match. """
    if not np.array_equiv(exp1.result["learning_steps"], exp2.result["learning_steps"]):
        # Same number of steps before failure (where applicable)
        return False
    if not np.array_equiv(exp1.result["return"], exp2.result["return"]):
        # Same return on each test episode
        return False
    if not np.array_equiv(exp1.result["steps"], exp2.result["steps"]):
        # Same number of steps taken on each training episode
        return False
    return True
Exemplo n.º 13
0
    def test_adjust_not_inplace(self):
        w = self.ll.get_weight()
        new_line = self.ll.adjust(np.array([1,2,3]))
        w_same_line = self.ll.get_weight()
        w_adjusted = new_line.get_weight()

        self.assertTrue(
            np.array_equiv(w,w_same_line)
        )

        self.assertFalse(
            np.array_equiv(w_same_line,w_adjusted)
        )
Exemplo n.º 14
0
 def test_missclassify_diff_line(self):
     new_line = LearningLine(0,2)
     test_point = np.array([1,1])
     self.assertFalse(
         np.array_equiv(self.ll.missclassify(new_line,
                                             self.test_points),
                        np.arange(4)))
Exemplo n.º 15
0
 def test_get_betas(self):
     self.assertTrue(
         np.array_equiv(
             self.ll.get_betas(),
             np.array([0,1])
         )
     )
Exemplo n.º 16
0
    def test_coverage_recovery(self):
        # Create the coverage
        cov, dset = self.get_cov()
        if cov._persistence_layer.master_manager.storage_type() != 'hdf':
            # TODO: Check for something Cassandra related
            self.assertTrue(True)
        else:
            cov_pth = cov.persistence_dir
            cov.close()

            # Analyze the valid coverage
            dr = CoverageDoctor(cov_pth, 'dprod', dset)

            dr_result = dr.analyze()

            # TODO: Turn these into meaningful Asserts
            self.assertEqual(len(dr_result.get_brick_corruptions()), 0)
            self.assertEqual(len(dr_result.get_brick_size_ratios()), 6)
            self.assertEqual(len(dr_result.get_corruptions()), 0)
            self.assertEqual(len(dr_result.get_master_corruption()), 0)
            self.assertEqual(len(dr_result.get_param_corruptions()), 0)
            self.assertEqual(len(dr_result.get_param_size_ratios()), 3)
            self.assertEqual(len(dr_result.get_master_size_ratio()), 1)
            self.assertEqual(len(dr_result.get_size_ratios()), 10)
            self.assertEqual(dr_result.master_status[1], 'NORMAL')

            self.assertFalse(dr_result.is_corrupt)
            self.assertEqual(dr_result.param_file_count, 3)
            self.assertEqual(dr_result.brick_file_count, 6)
            self.assertEqual(dr_result.total_file_count, 10)

            # Get original values (mock)
            orig_cov = AbstractCoverage.load(cov_pth)
            time_vals_orig = orig_cov.get_time_values()
            orig_cov.close()

            # Corrupt the Master File
            fo = open(cov._persistence_layer.master_manager.file_path, "wb")
            fo.write('Junk')
            fo.close()
            # Corrupt the lon Parameter file
            fo = open(cov._persistence_layer.parameter_metadata['lon'].file_path, "wb")
            fo.write('Junk')
            fo.close()

            corrupt_res = dr.analyze(reanalyze=True)
            self.assertTrue(corrupt_res.is_corrupt)

            # Repair the metadata files
            dr.repair(reanalyze=True)

            fixed_res = dr.analyze(reanalyze=True)
            self.assertFalse(fixed_res.is_corrupt)

            fixed_cov = AbstractCoverage.load(cov_pth)
            self.assertIsInstance(fixed_cov, AbstractCoverage)

            time_vals_fixed = fixed_cov.get_time_values()
            fixed_cov.close()
            self.assertTrue(np.array_equiv(time_vals_orig, time_vals_fixed))
Exemplo n.º 17
0
def test_cost_returns_none():
    x = np.array([[1], [2], [3], [4]])
    y = np.array([[4], [5], [6], [7]])

    loop_state = create_loop_state(x[:3, :], y[:3, :])

    assert np.array_equiv(loop_state.cost, np.array([None, None, None]))
    def test_coverage_recovery(self):
        # Create the coverage
        dp_id, stream_id, route, stream_def_id, dataset_id = self.load_data_product()
        self.populate_dataset(dataset_id, 36)
        dset = self.dataset_management.read_dataset(dataset_id)
        dprod = self.dpsc_cli.read_data_product(dp_id)
        cov = DatasetManagementService._get_simplex_coverage(dataset_id)
        cov_pth = cov.persistence_dir
        cov.close()

        # Analyze the valid coverage
        dr = CoverageDoctor(cov_pth, dprod, dset)
        dr_result = dr.analyze()

        # Get original values (mock)
        orig_cov = AbstractCoverage.load(cov_pth)
        time_vals_orig = orig_cov.get_time_values()

        # TODO: Destroy the metadata files

        # TODO: RE-analyze coverage

        # TODO: Should be corrupt, take action to repair if so

        # Repair the metadata files
        dr.repair_metadata()

        # TODO: Re-analyze fixed coverage

        fixed_cov = AbstractCoverage.load(cov_pth)
        self.assertIsInstance(fixed_cov, AbstractCoverage)

        time_vals_fixed = fixed_cov.get_time_values()
        self.assertTrue(np.array_equiv(time_vals_orig, time_vals_fixed))
Exemplo n.º 19
0
 def test_adjust_inplace(self):
     w = self.ll.get_weight()
     self.ll.adjust_inplace(np.array([1,2,3]))
     w_adjusted = self.ll.get_weight()
     self.assertFalse(
         np.array_equiv(w,w_adjusted)
     )
Exemplo n.º 20
0
Arquivo: gp.py Projeto: DiNAi/george
 def _compute_alpha(self, y):
     # Recalculate alpha only if y is not the same as the previous y.
     if self._alpha is None or not np.array_equiv(y, self._y):
         self._y = y
         r = np.ascontiguousarray(self._check_dimensions(y)[self.inds]
                                  - self.mean(self._x), dtype=np.float64)
         self._alpha = self.solver.apply_inverse(r, in_place=True)
Exemplo n.º 21
0
 def test_operators(self):
     logger.info("Testeando operadores...")
     other = np.ones(self.matrix_neurons.shape)
     # Return: LocalNeurons
     res = self.matrix_neurons * other
     assert np.array_equiv(res.matrix, np.ones(self.matrix_neurons.shape))
     res = self.matrix_neurons / other
     assert np.array_equiv(res.matrix, np.ones(self.matrix_neurons.shape))
     res = self.matrix_neurons - other
     assert np.array_equiv(res.matrix, np.zeros(self.matrix_neurons.shape))
     res = self.matrix_neurons + other
     assert np.array_equiv(res.matrix, np.ones(self.matrix_neurons.shape) * 2)
     res **= 2  # LocalNeurons
     assert np.array_equiv(res.matrix, np.ones(self.matrix_neurons.shape) * 4)
     assert self.matrix_neurons == LocalNeurons(self.matrix_neurons.matrix, self.matrix_neurons.shape)
     logger.info("OK")
Exemplo n.º 22
0
 def __eq__(self, other):
     if isinstance(other, self.__class__):
         return (self.header   == other.header and
                 self.sequence == other.sequence and
                 np.array_equiv(self.quality, other.quality))
     else:
         return False
Exemplo n.º 23
0
def log(T):
    (R,p) = breakTrans(T)
    if np.array_equiv(R, np.eye(3)):
        w = np.zeros((3,1))
        theta = 1
        u = p
        
    else:
        tr = R.trace()
        if tr == -1:
            theta = -math.pi
            w = np.array([[ sqrt((R[i][i]+1)/2) for i in range(3)]]).T
            W = bra(w)
        else:
            theta = acos((tr-1)/2)
            W = (1/(2*sin(theta)))*(R-R.T)
            w = ibra(W)
        
        W2 = np.dot(W,W)
        G1 = 1./theta*np.eye(3)
        G2 = 0.5*W
        G3 = (1./theta-cot(theta/2)/2)*W2
        G = G1+G2+G3
        # This corrects the bug!!!!!
        G = G.T
        u = np.dot(G,p)
    
    h = np.dot(w.T,u)
    return (w,u,theta,h)
Exemplo n.º 24
0
def test_impute():
    d = PreprocessingDescriptor("mean", [], [], [])
    sample = Sample.from_file(from_current_dir("sample6.xlsx"))
    impute_model = d.impute(sample)
    sample.impute_nan(impute_model)

    assert array_equiv(sample.attributes, [[3], [3], [3], [3], [3]])
Exemplo n.º 25
0
def test_every_iteration_model_updater_with_cost():
    """
    Tests that the model updater can use a different attribute from loop_state as the training targets
    """

    class MockModel(IModel):
        def optimize(self):
            pass

        def set_data(self, X: np.ndarray, Y: np.ndarray):
            self._X = X
            self._Y = Y

        @property
        def X(self):
            return self._X

        @property
        def Y(self):
            return self._Y

    mock_model = MockModel()
    updater = FixedIntervalUpdater(mock_model, 1, lambda loop_state: loop_state.cost)

    loop_state_mock = mock.create_autospec(LoopState)
    loop_state_mock.iteration = 1
    loop_state_mock.X.return_value(np.random.rand(5, 1))
    loop_state_mock.cost.return_value(np.random.rand(5, 1))

    cost = np.random.rand(5, 1)
    loop_state_mock.cost.return_value(cost)
    updater.update(loop_state_mock)
    assert np.array_equiv(mock_model.X, cost)
def test_agglomerative_clustering_with_distance_threshold(linkage):
    # Check that we obtain the correct number of clusters with
    # agglomerative clustering with distance_threshold.
    rng = np.random.RandomState(0)
    mask = np.ones([10, 10], dtype=np.bool)
    n_samples = 100
    X = rng.randn(n_samples, 50)
    connectivity = grid_to_graph(*mask.shape)
    # test when distance threshold is set to 10
    distance_threshold = 10
    for conn in [None, connectivity]:
        clustering = AgglomerativeClustering(
            n_clusters=None,
            distance_threshold=distance_threshold,
            connectivity=conn, linkage=linkage)
        clustering.fit(X)
        clusters_produced = clustering.labels_
        num_clusters_produced = len(np.unique(clustering.labels_))
        # test if the clusters produced match the point in the linkage tree
        # where the distance exceeds the threshold
        tree_builder = _TREE_BUILDERS[linkage]
        children, n_components, n_leaves, parent, distances = \
            tree_builder(X, connectivity=conn, n_clusters=None,
                         return_distance=True)
        num_clusters_at_threshold = np.count_nonzero(
            distances >= distance_threshold) + 1
        # test number of clusters produced
        assert num_clusters_at_threshold == num_clusters_produced
        # test clusters produced
        clusters_at_threshold = _hc_cut(n_clusters=num_clusters_produced,
                                        children=children,
                                        n_leaves=n_leaves)
        assert np.array_equiv(clusters_produced,
                              clusters_at_threshold)
Exemplo n.º 27
0
def _equal(a, b):
    #recursion on subclasses of types: tuple, list, dict
    #specifically checks             : float, ndarray
    if type(a) is float and type(b) is float:#float
        return(numpy.allclose(a, b))
    elif type(a) is numpy.ndarray and type(b) is numpy.ndarray:#ndarray
        return(numpy.array_equiv(a, b))#alternative for float-arrays: numpy.allclose(a, b[, rtol, atol])
    elif isinstance(a, dict) and isinstance(b, dict):#dict
        if len(a) != len(b):
            return(False)
        t = True
        for key, val in a.items():
            if key not in b:
                return(False)
            t = _equal(val, b[key])
            if not t:
                return(False)
        return(t)
    elif (isinstance(a, list) and isinstance(b, list)) or (isinstance(a, tuple) and isinstance(b, tuple)):#list, tuples
        if len(a) != len(b):
            return(False)
        t = True
        for vala, valb in zip(a, b):
            t = _equal(vala, valb)
            if not t:
                return(False)
        return(t)
    else:#fallback
        return(a == b)
Exemplo n.º 28
0
 def exp(xi,tau=None):
     #given a 6x1 vector returns a SE3 object
     #may output garbage if matrix not invertible
     c=np.zeros((3,1))
     xiHat=SE3.hat(xi)
     v=np.array([[xiHat[0,3]],
                 [xiHat[1,3]],
                 [xiHat[2,3]]])
     w=np.array([[xiHat[2,1]],
                 [xiHat[0,2]],
                 [xiHat[1,0]]])
     wtrans=w.T
     what=xiHat[0:3,0:3]
     normw=np.linalg.norm(w)
     w2=w.dot(wtrans)-math.pow(normw,2)*np.eye(3)
     if tau==None:
         tau=1
         print tau
     if np.array_equiv(w,c)==False:
         ewt=np.eye(3)+(what/normw)*math.sin(tau*normw)+w2*(1-math.cos(normw*tau))/math.pow(normw,2)
         d1=np.eye(3)-ewt
         d2=d1.dot(what)/math.pow(normw,2)
         d3=d2.dot(v)
         d32=(w.dot(wtrans).dot(v)*tau)/math.pow(normw,2)
         d=d3+d32
     else:
         ewt=np.eye(3)
         d=v*tau
     expXi=np.concatenate((np.concatenate((ewt,d),axis=1),np.array([[0,0,0,1]])),axis=0)
     omegatau=SE3()
     omegatau.__M=expXi
     return omegatau
Exemplo n.º 29
0
def _test_patch_at():
    size = (101, 101)
    orig = cv2.imread(os.path.abspath('tests/patch_at/test.tiff'))
    image = img2np(cv2.copyMakeBorder(orig, top=size[1], bottom=size[1], left=size[0], right=size[0],
                                      borderType=cv2.BORDER_DEFAULT))
    patch_0_0 = img2np(cv2.imread(os.path.abspath('tests/patch_at/patch_0_0.tiff')))
    patch_300_500 = img2np(cv2.imread(os.path.abspath('tests/patch_at/patch_300_500.tiff')))
    patch_500_300 = img2np(cv2.imread(os.path.abspath('tests/patch_at/patch_500_300.tiff')))
    pixels = [(51, 51), (351, 551), (551, 351)]
    outputs = (patch_0_0, patch_300_500, patch_500_300)
    for (x, y), expected in zip(pixels, outputs):
        actual = patch_at(image=image, x=x, y=y, size=size)
        actual = normalize(actual)
        expected = normalize(expected)
        try:
            assert np.array_equiv(expected, actual)
        except AssertionError:
            print "Failed: ", (x, y), expected.shape, actual.shape
            import pylab
            f = pylab.figure()
            f.add_subplot(3, 1, 1)
            pylab.imshow(np2img(actual))
            f.add_subplot(3, 1, 2)
            pylab.imshow(np2img(expected))
            f.add_subplot(3, 1, 3)
            diff = expected - actual
            pylab.imshow(np2img(diff))
            from scipy.linalg import norm
            print "Norms: ", np.sum(np.abs(diff)), norm(diff.ravel(), 0)
            pylab.show()
Exemplo n.º 30
0
def par_knowledge_test():
    import os
    import numpy as np
    import pyemu
    pst_file = os.path.join("pst","pest.pst")
    pst = pyemu.Pst(pst_file)

    tpl_file = os.path.join("utils","pp_locs.tpl")
    str_file = os.path.join("utils","structure.dat")
    pp_df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
    pkd = {"kr01c01":0.1}
    try:
        cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{str_file:tpl_file},
                                                         par_knowledge_dict=pkd)
    except:
        return
    else:
        raise Exception("should have failed")
    d1 = np.diag(cov.x)


    df = pyemu.gw_utils.pp_tpl_to_dataframe(tpl_file)
    df.loc[:,"zone"] = np.arange(df.shape[0])
    gs = pyemu.geostats.read_struct_file(str_file)
    cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{gs:df},
                                               sigma_range=4)
    nnz = np.count_nonzero(cov.x)
    assert nnz == pst.npar
    d2 = np.diag(cov.x)
    assert np.array_equiv(d1, d2)
Exemplo n.º 31
0
    def test_group_by(self):
        values = np.arange(12).reshape(3, 4)
        ax1 = Axis("year", [2014, 2014, 2014])
        ax2 = Axis("month", ["jan", "jan", "feb", "feb"])
        c = Cube([ax1, ax2],values)
        
        d = c.reduce(np.mean, group=0)  # average by year
        self.assertTrue(np.array_equal(d.values, np.array([[4, 5, 6, 7]])))
        self.assertTrue(is_indexed(d.axis(0)))
        self.assertEqual(len(d.axis(0)), 1)
        self.assertEqual(d.values.shape, (1, 4))  # axes with length of 1 are not collapsed

        d = c.reduce(np.sum, group=ax2.name, sort_grp=False)  # sum by month
        self.assertTrue(np.array_equal(d.values, np.array([[1, 5], [9, 13], [17, 21]])))
        self.assertTrue(np.array_equal(d.axis(ax2.name).values, ["jan", "feb"]))

        d = c.reduce(np.sum, group=ax2.name)  # sum by month, sorted by default
        self.assertTrue(np.array_equal(d.values, np.array([[5, 1], [13, 9], [21, 17]])))
        self.assertTrue(np.array_equal(d.axis(ax2.name).values, ["feb", "jan"]))
        self.assertTrue(is_indexed(d.axis(ax2.name)))
        self.assertEqual(len(d.axis(ax2.name)), 2)
        self.assertEqual(d.values.shape, (3, 2))
        
        # testing various aggregation functions using direct calling, e.g. c.sum(group=0),
        # or indirect calling, e.g. reduce(func=np.sum, group=0)
        funcs_indirect = [np.sum, np.mean, np.median, np.min, np.max, np.prod]
        funcs_direct = [c.sum, c.mean, c.median, c.min, c.max, c.prod]
        for func_indirect, func_direct in zip(funcs_indirect, funcs_direct):
            result = np.apply_along_axis(func_indirect, 0, c.values)
            d = c.reduce(func_indirect, group=ax1.name)
            self.assertTrue(np.array_equiv(d.values, result))
            e = func_direct(group=ax1.name)
            self.assertTrue(np.array_equiv(e.values, result))

        # testing function with extra parameters which cannot be passed as *args
        third_quartile = functools.partial(np.percentile, q=75)
        d = c.reduce(third_quartile, group=ax1.name)
        self.assertTrue(np.array_equiv(d.values, np.apply_along_axis(third_quartile, 0, c.values)))

        # the same but using lambda - this is actually simpler and more powerful way
        third_quartile_lambda = lambda sample: np.percentile(sample, q=75)
        d = c.reduce(third_quartile_lambda, group=ax1.name)
        self.assertTrue(np.array_equiv(d.values, np.apply_along_axis(third_quartile_lambda, 0, c.values)))
Exemplo n.º 32
0
def _testlaue(sym):
    # This is a 'template' function
    # uniq only contains test data
    from nicos.devices.sxtal.xtal.uniqdata import uniq
    lauein = np.array([(h, k, l) for l in range(-2, 3) for k in range(-2, 3)
                       for h in range(-2, 3)])
    l = Laue(sym)
    res = l.uniqds(lauein)
    if sym in uniq:
        assert np.array_equiv(uniq[sym], res)
Exemplo n.º 33
0
 def test_transform(self):
     """
     tests if the 2 vectorizers create the same features for the data
     """
     sents = self.data_df[self.text_col].values
     ext = BOWExtractor(self.data_df, self.col_names)  # calls _prepare_features
     self.orig_vect.fit(self.data_df[self.text_col])
     # do they have the same features
     self.assertTrue(np.array_equiv(ext.transform(self.data_df, self.col_names),
                                    self.orig_vect.transform(sents).toarray()))
def test_round_trip(skm):
    s, k, m = skm
    values = np.arange(256)
    decompressed = decompress(values, s=s, k=k, m=m)
    output = compress(decompressed, s=s, k=k, m=m)
    # account for two values of 0 which are possible but as input to decompress is a int only get
    # one
    if s == 1:
        values[values == 128] = 0
    assert np.array_equiv(values, output)
Exemplo n.º 35
0
def test_signal_slicing(rng):
    slices = [0, 1, slice(None, -1), slice(1, None), slice(1, -1),
              slice(None, None, 3), slice(1, -1, 2)]

    x = np.arange(12, dtype=float)
    y = np.arange(24, dtype=float).reshape(4, 6)
    a = Signal(x.copy())
    b = Signal(y.copy())

    for i in range(100):
        si0, si1 = rng.randint(0, len(slices), size=2)
        s0, s1 = slices[si0], slices[si1]
        assert np.array_equiv(a[s0].initial_value, x[s0])
        assert np.array_equiv(b[s0, s1].initial_value, y[s0, s1])

    with pytest.raises(ValueError):
        a[[0, 2]]
    with pytest.raises(ValueError):
        b[[0, 1], [3, 4]]
Exemplo n.º 36
0
 def __eq__(self, other):
     """Compare two grid objects."""
     for key, item in self.__dict__.items():
         other_item = None
         if key in other.__dict__:
             other_item = other.__dict__[key]
         if not np.array_equiv(np.nan_to_num(item),
                               np.nan_to_num(other_item)):
             return False
     return True
Exemplo n.º 37
0
def test_group_conv():
    print('Testing group_conv')

    if not tf.executing_eagerly():
        tf.enable_eager_execution()

    groups = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 0, 0])

    def run_group_conv(groups, all_groups, n_neighbors, enable_group_crossing):
        groups = groups_4d(np.array([groups]))

        groups2 = group_conv(
            groups, all_groups=all_groups, n_neighbors=n_neighbors,
            enable_group_crossing=enable_group_crossing
        )
        groups2 = tf.squeeze(tf.squeeze(groups2, axis=-1), axis=-1)
        groups2 = groups2.numpy()

        return groups2

    assert np.array_equiv(
        run_group_conv(groups, all_groups=[1, 2],
                       n_neighbors=3, enable_group_crossing=True),
        [0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0]
    )

    assert np.array_equiv(
        run_group_conv(groups, all_groups=[1, 2],
                       n_neighbors=3, enable_group_crossing=True),
        [0, 1, 1, 1, 0, 0, 2, 2, 2, 0, 0, 0]
    )

    assert np.array_equiv(
        run_group_conv(groups, all_groups=[1, 2],
                       n_neighbors=1, enable_group_crossing=True),
        groups
    )

    assert np.array_equiv(
        run_group_conv(groups, all_groups=[1, 2],
                       n_neighbors=1, enable_group_crossing=True),
        groups
    )
 def predict(self, X):
     X = check_array(X)
     # if the input contains the same elements but different sample order,
     # then just return zeros.
     if (
         np.array_equiv(np.sort(X, axis=0), np.sort(self._X, axis=0))
         and (X != self._X).any()
     ):
         return np.zeros(X.shape[0])
     return X[:, 0]
Exemplo n.º 39
0
def k_means_cs171(x_input, k, init_centroids):
    #x_input = matrix of data points.
    #k = number of clusters
    #init_centroids = kxfeature matrix that contains initialization for centroids.

    cluster_centroids = init_centroids  #start centroids at init values
    cluster_assignments = np.zeros(shape=(len(x_input), 1), dtype=int)

    cluster_copy = np.ones(shape=(len(x_input), 1),
                           dtype=int)  #use to check if centroids
    #Repeat while cluster assignments don’t change:
    #a) Assign each point to the nearest centroid using Euclidean distance
    #b) Given new assignments, compute new cluster centroids as mean of all points in cluster
    while (not (np.array_equiv(cluster_assignments, cluster_copy))):
        #        print(cluster_assignments)
        Num_In_Clust = [[0, 0, 0, 0,
                         0]]  #initialize list to hold totals of each cluster.
        if k > 1:
            for i in range(k - 1):
                Num_In_Clust.append([0, 0, 0, 0,
                                     0])  #append number of lists required
        cluster_copy = np.copy(
            cluster_assignments
        )  #set initially to equal, check at the end for equality
        for i in range(
                len(x_input)):  #for each, determine which cluster it is in.
            lowest_val = (
                1000000.0, -100
            )  #holds distance / which cluster_centroid value / current x_input
            if k > 1:
                for j in range(k):
                    value = Euclidian(x_input[i], init_centroids[j])
                    if value < lowest_val[0]:
                        lowest_val = (value, j)
            else:
                lowest_val = (
                    0, 0
                )  #no point in seeing what centroid to add to if k <= 1.
            #re-calculate centroid
            #Grab cluster for each lowest_val
            index = lowest_val[1]
            x_val = np.copy(x_input[i])
            cluster_assignments[i] = lowest_val[1]
            for j in range(len(x_val)):
                Num_In_Clust[index][j] = Num_In_Clust[index][j] + x_val[j]
            Num_In_Clust[index][
                -1] += 1  #Increment by 1 to calculate mean with.
            for j in range(len(x_val)):
                if (cluster_centroids.ndim > 1):
                    cluster_centroids[index][
                        j] = Num_In_Clust[index][j] / Num_In_Clust[index][-1]
                else:
                    cluster_centroids[
                        j] = Num_In_Clust[index][j] / Num_In_Clust[index][-1]
    return cluster_assignments, cluster_centroids
Exemplo n.º 40
0
    def test_coherence_files_not_converted(self):
        # define constants
        NO_DATA_VALUE = 0
        driver = gdal.GetDriverByName('GTiff')

        # create a sample gdal dataset

        # sample gdal dataset
        sample_gdal_filename = 'sample_gdal_dataset.tif'
        sample_gdal_dataset = driver.Create(sample_gdal_filename, 5, 5, 1,
                                            gdal.GDT_Float32)
        srs = osr.SpatialReference()
        wkt_projection = srs.ExportToWkt()
        sample_gdal_dataset.SetProjection(wkt_projection)

        sample_gdal_band = sample_gdal_dataset.GetRasterBand(1)
        sample_gdal_band.SetNoDataValue(NO_DATA_VALUE)
        sample_gdal_band.WriteArray(np.arange(25).reshape(5, 5))

        # create a coherence mask dataset
        coherence_mask_filename = 'coherence_mask_dataset.tif'
        coherence_mask_dataset = driver.Create(coherence_mask_filename, 5, 5,
                                               1, gdal.GDT_Float32)
        srs = osr.SpatialReference()
        wkt_projection = srs.ExportToWkt()
        coherence_mask_dataset.SetProjection(wkt_projection)
        coherence_mask_band = coherence_mask_dataset.GetRasterBand(1)
        coherence_mask_band.SetNoDataValue(NO_DATA_VALUE)
        coherence_mask_band.WriteArray(
            np.arange(0, 75, 3).reshape(5, 5) / 100.0)

        # create a artificial masked dataset
        expected_result_array = np.nan_to_num(
            np.array([[np.nan, np.nan, np.nan, np.nan, np.nan],
                      [np.nan, np.nan, np.nan, np.nan, np.nan],
                      [10., 11., 12., 13., 14.], [15., 16., 17., 18., 19.],
                      [20., 21., 22., 23., 24.]]))

        # use the gdal_python.coherence_masking to find the actual mask dataset
        threshold = 0.3
        gdal_python.coherence_masking(sample_gdal_dataset,
                                      coherence_mask_dataset, threshold)
        sample_gdal_array = np.nan_to_num(
            sample_gdal_dataset.GetRasterBand(1).ReadAsArray())

        # compare the artificial masked and actual masked datasets
        self.assertTrue(
            np.array_equiv(sample_gdal_array, expected_result_array))

        # del the tmp datasets created
        del coherence_mask_dataset
        os.remove(coherence_mask_filename)

        del sample_gdal_dataset
        os.remove(sample_gdal_filename)
Exemplo n.º 41
0
def test_run_stochastic_opt_false(clock_05, grid_1):
    params = {
        "grid": grid_1,
        "opt_stochastic_duration": False,
        "clock": clock_05,
        "record_rain": True,
        "m_sp": 0.5,
        "n_sp": 1.0,
        "water_erodibility": 0.01,
        "regolith_transport_parameter": 0.1,
        "infiltration_capacity": 0.0,
        "rainfall__mean_rate": 1.0,
        "rainfall_intermittency_factor": 0.1,
        "rainfall__shape_factor": 0.6,
        "number_of_sub_time_steps": 1,
        "random_seed": 1234,
    }

    model = BasicSt(**params)
    assert model.opt_stochastic_duration is False
    model.run_for(model.clock.step, 10000.0)

    rainfall_rate = np.asarray(model.rain_record["rainfall_rate"])
    event_duration = np.asarray(model.rain_record["event_duration"])

    dry_times = event_duration[rainfall_rate == 0]
    wet_times = event_duration[rainfall_rate > 0]

    assert (np.array_equiv(
        dry_times,
        model.clock.step * (1.0 - params["rainfall_intermittency_factor"]),
    ) is True)
    assert (np.array_equiv(
        wet_times,
        model.clock.step * (params["rainfall_intermittency_factor"]),
    ) is True)

    avg_storm_depth = np.sum((rainfall_rate * event_duration)) / len(wet_times)

    np.testing.assert_array_almost_equal(avg_storm_depth,
                                         params["rainfall__mean_rate"],
                                         decimal=1)
Exemplo n.º 42
0
    def test_12_renumbering_and_crossmatching_sources(self):
        #Testing functions for renumbering sources: renumbering to sequential,
        #and grouping based on cross-matching position.

        source_arrays2 = make_simdata_4()
        pol_spec2 = polspectra.from_arrays(
            source_arrays2['ra'], source_arrays2['dec'],
            source_arrays2['freq'], source_arrays2['StokesI'],
            source_arrays2['StokesI_error'], source_arrays2['StokesQ'],
            source_arrays2['StokesQ_error'], source_arrays2['StokesU'],
            source_arrays2['StokesU_error'], source_arrays2['source_number'],
            source_arrays2['beam_major'], source_arrays2['beam_minor'],
            source_arrays2['beam_pa'])
        self.assertTrue(
            np.array_equiv(pol_spec2['source_number'], [0, 1, 3, 4, 5, 6]),
            "Test inputs have changed! Test needs to be fixed")
        pol_spec2.renumber_sources()
        self.assertTrue(
            np.array_equiv(pol_spec2['source_number'], [0, 1, 2, 3, 4, 5]),
            "Renumbering sources not following expectations.")
        pol_spec2.crossmatch_sources(6, consecutive=False)
        self.assertTrue(
            np.array_equiv(pol_spec2['source_number'], [0, 0, 0, 0, 4, 5]),
            "Crossmatching sources not following expectations.")
        pol_spec2.renumber_sources()
        self.assertTrue(
            np.array_equiv(pol_spec2['source_number'], [0, 0, 0, 0, 1, 2]),
            "Renumbering sources after crossmatching not following expectations."
        )
        pol_spec2 = polspectra.from_arrays(
            source_arrays2['ra'], source_arrays2['dec'],
            source_arrays2['freq'], source_arrays2['StokesI'],
            source_arrays2['StokesI_error'], source_arrays2['StokesQ'],
            source_arrays2['StokesQ_error'], source_arrays2['StokesU'],
            source_arrays2['StokesU_error'], source_arrays2['source_number'],
            source_arrays2['beam_major'], source_arrays2['beam_minor'],
            source_arrays2['beam_pa'])
        pol_spec2.crossmatch_sources(6, consecutive=True)
        self.assertTrue(
            np.array_equiv(pol_spec2['source_number'], [0, 0, 0, 0, 1, 2]),
            "Crossmatching consecutive functionality not following expectations."
        )
Exemplo n.º 43
0
 def test_linalg(self):
     logger.info("Testeando operaciones algebraicas...")
     N = self.matrix_neurons.rows
     other = np.array(range(N))
     # Producto matricial
     res = self.matrix_neurons.mul_array(other)
     sum_range = sum(range(N))
     assert np.array_equiv(res.matrix, np.array([sum_range] * N))
     # Producto punto
     #res = res.transpose().dot(range(N))
     #assert res == (sum_range ** 2) * N
     # Producto entre elementos
     assert self.matrix_neurons.mul_elemwise(
         self.matrix_neurons) == self.matrix_neurons.collect()
     # Suma entre arreglos
     assert np.array_equiv(
         self.matrix_neurons.sum_array(self.matrix_neurons),
         self.matrix_neurons * 2)
     assert self.matrix_neurons.sum() == self.matrix_neurons.count()
     logger.info("OK")
Exemplo n.º 44
0
 def test_iter1(self):
     A = np.array([[1, 2, 2], [2, 1, 2], [2, 2, 1]])
     b = np.array([0, 20, 20, 20])
     c = np.array([-10, -12, -12])
     test_itr_1 = np.array([[120.0, -4.0, 0.0, 0.0, 6.0, 0.0, 0.0],
                            [10.0, 0.5, 1.0, 1.0, 0.5, 0.0, 0.0],
                            [10.0, 1.5, 0.0, 1.0, -0.5, 1.0, 0.0],
                            [0.0, 1.0, 0.0, -1.0, -1.0, 0.0, 1.0]])
     s = my.Simplex(A, b, c, "MIN")
     pass1 = s.optimize()
     self.assertTrue(np.array_equiv(pass1, test_itr_1))
Exemplo n.º 45
0
def boundsurf_join(inds, B):
    inds = [(ind if ind.dtype != bool else where(ind)[0]) for ind in inds]
    #    B = [b for b in B if b is not None]
    L = surf_join(inds, [b.l for b in B])
    U = surf_join(inds, [b.u for b in B])  #if self.l is not self.u else L
    definiteRange = True \
    if PythonAll(np.array_equiv(True, b.definiteRange) for b in B)\
    else Join(inds, [np.asarray(b.definiteRange) for b in B])
    from boundsurf2 import boundsurf2
    b = boundsurf if type(L) == type(U) == surf else boundsurf2
    return b(L, U, definiteRange, B[0].domain)
Exemplo n.º 46
0
    def test_zip_arr(self):

        arr1 = np.array([[2, 4], [6, 8]], dtype=np.int)
        arr2 = np.array([[1, 3], [5, 7]], dtype=np.int)

        zipped = zip_arr(arr1, arr2, field_names=['even', 'odd'])
        self.assertTrue(
            np.array_equiv(
                zipped,
                np.array([[(2, 1), (4, 3)], [(6, 5), (8, 7)]],
                         dtype=[('even', np.int), ('odd', np.int)])))
 def test_get_by_list(self):
     results = []
     brick_size = 10
     time_steps = 30
     cov, cov_name = self.get_cov(brick_size=brick_size, nt=time_steps)
     dat = cov.get_parameter_values('time')
     for s in range(len(dat)):
         mock_data = s
         data = cov.get_parameter_values('time', [s])
         results.append(np.array_equiv(mock_data, data))
     self.assertTrue(False not in results)
Exemplo n.º 48
0
def comparar_arrays():
    a1 = np.array([1, 2, 4, 6, 7])
    a2 = np.array([1, 3, 4, 5, 7])
    a3 = np.array([1, 3, 4.00001, 5, 7])

    print("-")
    print(np.array_equal(a1, a1))
    print(np.array_equal(a1, a2))
    print("-")

    print(np.allclose(a1, a2))
    print(np.allclose(a3, a2))
    print("-")

    print(np.array_equiv(a1, a2))
    print(np.array_equiv(a3, a2))
    print("-")

    print((a1 == a2).all())
    print((a3 == a2).all())
Exemplo n.º 49
0
def test_sequential_with_all_parameters_fixed():
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
    acquisition_optimizer = AcquisitionOptimizer(space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25, 'y': 0.25})
    assert np.array_equiv(next_points, np.array([0.25, 0.25]))
Exemplo n.º 50
0
def test_refresh_image_correctly_change_current_image():
    # Given
    new_image = np.array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]],
                          [[0, 0, 0], [1, 1, 1], [2, 2, 2]]])
    im = ImageManager()

    # When
    im.refresh_image(new_image)

    # Then
    assert np.array_equiv(im.current_image, new_image)
Exemplo n.º 51
0
def int_nan_streches(data_input):
    """ finds nan streches and interporlates the strech, pads start as continuous"""
    data = copy.copy(data_input)

    # taking care of ends
    # determine if data started with nan or not
    if np.isnan(data[0]):
        # find where the data starts
        j = 0
        found_start = 0
        while found_start == 0:
            if ~np.isnan(data[j]):
                start_notnan = j
                found_start = 1
            j = j + 1
        data[0:start_notnan] = data[start_notnan]

    if np.isnan(data[-1]):
        # find where the last nan strech starts
        j = -1
        found_start = 0
        while found_start == 0:
            if ~np.isnan(data[j]):
                start_notnan = j
                found_start = 1
            j = j - 1
        data[start_notnan:] = data[start_notnan]

    # finds indices of NaNs
    data_nans = np.isnan(data) * 1

    # cleaning data of spurious positions
    spurious = [1, 1, 1, 0, 1, 1, 1]

    for idx, data_point in enumerate(data_nans[0:-2], start=2):
        if np.array_equiv(spurious, data_nans[idx - 3:idx + 4]):
            data_nans[idx - 3:idx + 4] = [1, 1, 1, 1, 1, 1, 1]

    # finds start or finsh of NaN streches
    changes = np.diff(data_nans, axis=0)

    # added 1 to bout_start as otherwise it is the last timepoint that was below the threshold.
    # Also did it to ends so a peak of one timepoint would have a length of 1.
    bout_start = np.where(changes == 1)[0] + 1
    bout_end = np.where(changes == -1)[0] + 1

    for idx, strech in enumerate(bout_start):
        data[bout_start[idx]:bout_end[idx]] = np.interp(
            np.arange(bout_start[idx],
                      bout_end[idx]), [bout_start[idx] - 1, bout_end[idx]],
            [data[bout_start[idx] - 1], data[bout_end[idx]]])

    return data
Exemplo n.º 52
0
def numRecs(alfombra, rec):
    total = 0
    dim = alfombra.shape[0]
    ndim = math.ceil(dim / rec) * rec
    nalfombra = np.zeros((ndim, ndim))
    nalfombra[0:dim, 0:dim] = alfombra
    for i in range(0, ndim, rec):
        for j in range(0, ndim, rec):
            if not np.array_equiv(nalfombra[i:i + rec, j:j + rec],
                                  np.zeros((rec, rec))):
                total += 1
    return total
Exemplo n.º 53
0
 def test_target_cost(self):
     cost_list = [1., 2., -1.]
     interface = CostListInterface(cost_list)
     controller = mlc.create_controller(
         interface,
         max_num_runs=10,
         target_cost=-1,
         max_num_runs_without_better_params=4)
     controller.optimize()
     self.assertTrue(controller.best_cost == -1.)
     self.assertTrue(
         np.array_equiv(np.array(controller.in_costs), np.array(cost_list)))
Exemplo n.º 54
0
def test_k_full_and_k_diag_are_equivalent():
    """
    Test that kern.K and kern.Kdiag return equivalent results
    """
    kernels = []
    for i in range(0, 2):
        kernels.append(GPy.kern.RBF(1))
    k = emukit.multi_fidelity.kernels.LinearMultiFidelityKernel(kernels)
    inputs = np.random.rand(20, 2)
    inputs[:10, 1] = 1
    inputs[10:, 1] = 0
    assert np.array_equiv(np.diag(k.K(inputs)), k.Kdiag(inputs))
Exemplo n.º 55
0
def test_remove_empty_rows():
    rows = np.array([
        ['a-val1', 'b-val1', 'c-val1'],
        [],
        ['a-val3', 'b-val3', 'c-val3'],
    ])
    actual = remove_empty_rows(rows)
    expected = np.array([
        ['a-val1', 'b-val1', 'c-val1'],
        ['a-val3', 'b-val3', 'c-val3'],
    ])
    assert np.array_equiv(actual, expected)
Exemplo n.º 56
0
    def cast(self, topic: int) -> NoReturn:
        """Send the current image using given topic number.

        :param topic: The number of the topic used to send the image.
        """
        if np.array_equiv(self.im.current_image, np.array([])) or len(self.subs_list) == 0:
            return
        for msg_to_send in self.im.get_messages(topic):
            for sub in self.subs_list:
                self.udp_socket.sendto(msg_to_send, sub)
            if self.run_new_process:
                VideoStream.delay(1)
Exemplo n.º 57
0
def test_bernoulli():
    shapeInput = np.random.randint(1, 100, [
        2,
    ])
    inShape = NumCpp.Shape(shapeInput[0].item(), shapeInput[1].item())
    p = np.random.rand()

    randArray = NumCpp.bernoulli(inShape, p).getNumpyArray()
    assert np.array_equiv(randArray.shape, shapeInput)

    randValue = NumCpp.bernoulli(p)
    assert isinstance(randValue, bool)
Exemplo n.º 58
0
def find_rotation_between_vectors(a, b):
    """http://math.stackexchange.com/questions/293116/rotating-one-3d-vector-to-another"""
    if np.array_equiv(a, b):
        return [1, 0, 0, 0]

    axis = normalize(np.cross(a, b))
    dot = np.dot(a, b)
    if dot >= 1.0:
        return [1, 0, 0, 0]
    angle = math.acos(dot)
    q = quaternion_from_axis_angle(axis, angle)
    return q
 def test_FullElimination(self):
     test_arr = np.array([[21, 16, 13], [21, 18, 38], [24, 24, 6]], float)
     expected = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], float)
     arr_cpy = test_arr.copy()
     res = ge.gaussian_elimination(arr_cpy)
     expected = np.around(expected, 13)
     arr_cpy = np.around(arr_cpy, 13)
     self.assertEqual(len(res), 5)
     self.assertTrue(
         np.array_equiv(arr_cpy,
                        expected), "np.array_equiv was false for arrays: " +
         str(arr_cpy) + "\n" + str(expected))
 def test_SimpleEliminationMulti(self):
     multi_pool = Pool(3)
     ge.enableMultiprocessing(multi_pool)
     test_arr = np.array([[3, 0, 1], [3, 1, 1]], float)
     arr_cpy = test_arr.copy()
     res = ge.gaussian_elimination(arr_cpy)
     self.assertTrue(res is not None)
     self.assertTrue(len(res) == 2)
     self.assertTrue(
         np.array_equiv(arr_cpy, np.array([[1, 0, 1 / 3], [0, 1, 0]],
                                          float)))
     multi_pool.close()