Beispiel #1
0
    def fwd_all(self,X,w=None):
        """ Propagate values forward through the net.
        Inputs:
                inputs      - vector of input values
                w           - packed array of weights
        Returns:
                array of outputs for all input patterns
        """
        if w is not None:
            self.wp = w
        self.unpack()
        # compute hidden unit values
        z = N.zeros((len(X),self.centers.shape[0]))
        for i in range(len(X)):
             z[i] = N.exp((-1.0/(2*self.variance))*(N.sum((X[i]-self.centers)**2,axis=1)))
        # compute net outputs
        o = N.dot(z,self.w) + N.dot(N.ones((len(z),1)),self.b)
        # compute final output activations
        if self.outfxn == 'linear':
            y = o
        elif self.outfxn == 'logistic':     # TODO: check for overflow here...
            y = 1/(1+N.exp(-o))
        elif self.outfxn == 'softmax':      # TODO: and here...
            tmp = N.exp(o)
            y = tmp/(N.sum(temp,1)*N.ones((1,self.no)))

        return N.array(y)
Beispiel #2
0
def test_ridge():
    # Ridge regression convergence test using score
    # TODO: for this test to be robust, we should use a dataset instead
    # of np.random.
    rng = np.random.RandomState(0)
    alpha = 1.0

    for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
        # With more samples than features
        n_samples, n_features = 6, 5
        y = rng.randn(n_samples)
        X = rng.randn(n_samples, n_features)

        ridge = Ridge(alpha=alpha, solver=solver)
        ridge.fit(X, y)
        assert_equal(ridge.coef_.shape, (X.shape[1], ))
        assert_greater(ridge.score(X, y), 0.47)

        if solver == "cholesky":
            # Currently the only solver to support sample_weight.
            ridge.fit(X, y, sample_weight=np.ones(n_samples))
            assert_greater(ridge.score(X, y), 0.47)

        # With more features than samples
        n_samples, n_features = 5, 10
        y = rng.randn(n_samples)
        X = rng.randn(n_samples, n_features)
        ridge = Ridge(alpha=alpha, solver=solver)
        ridge.fit(X, y)
        assert_greater(ridge.score(X, y), .9)

        if solver == "cholesky":
            # Currently the only solver to support sample_weight.
            ridge.fit(X, y, sample_weight=np.ones(n_samples))
            assert_greater(ridge.score(X, y), 0.9)
Beispiel #3
0
def coloring(m, inv_row_p, inv_col_p):
    # The permutation must give a matrix in Hessenberg form
    row_major, col_major = get_permuted_sp_matrices(m, inv_row_p, inv_col_p)
    ncols = col_major.shape[1]
    # Initially all column colors are set to the invalid "color" -1
    column_colors = np.full(ncols, -1, np.int32)
    # If color i is available: color_available[i] is 1, otherwise 0
    # Initially, "color" 0 is available 
    color_available = np.ones(1, np.int8)
    # Iterate through the columns backwards, from right to left
    for c in reversed(irange(ncols)):
        # All rows containing c
        for r in rows_in_col(col_major, c):
            # All other columns in those rows, c's "neighbors"
            cols = cols_in_row(row_major, r)
            idx = np.flatnonzero(cols==c)
            # cols in r: [ left | c | right]
            # only the right index set has been processed so far
            right = cols[idx+1:]
            used_colors = column_colors[right]
            color_available[used_colors] = 0
        #print('Colors available:\n%s' % color_available)
        index = np.flatnonzero(color_available)[0]
        column_colors[c] = index
        # introduce new "color" if index == color_available.size-1 
        color_available = np.ones(max(index+2, color_available.size), np.int8)
    print('Colors:\n%s' % column_colors)
    # chromatic number >= max nonzeros in a row
    lb_chromatic_number = np.max(np.diff(row_major.indptr))
    color_count = color_available.size-1
    print('Color count: %d (>=%d)' % (color_count, lb_chromatic_number))
    return  column_colors, color_count
Beispiel #4
0
    def reset(self):
        if self.Q_init:
            self.Q = self.Q_init * np.ones(self.n)
        else: # init with small random numbers to avoid ties
            self.Q = np.random.uniform(0, 1e-4, self.n)

        if self.alpha:
            self.update_action_value = self.update_action_value_constant_alpha
        else:
            self.update_action_value = self.update_action_value_sample_average

        if self.epsilon is not None:
            self.choose_action = self.choose_action_greedy
        elif self.tau:
            self.choose_action = self.choose_action_softmax
        else:
            print('Error: epsilon or tau must be set')
            sys.exit(-1)

        self.rewards = []
        self.rewards_seq = []
        self.actions = []
        self.k_actions = np.ones(self.n) # number of steps for each action
        self.k_reward = 1
        self.average_reward = 0
        self.optimal_actions = []
Beispiel #5
0
    def test_setitem_all(self):

        data = np.zeros((10, 10), dtype=np.uint8)
        T = Texture(data=data)
        T[...] = np.ones((10, 10, 1))
        assert len(T._pending_data) == 1
        assert np.allclose(data, np.ones((10, 10, 1)))
def test_cross_val_score_fit_params():
    clf = MockClassifier()
    n_samples = X.shape[0]
    n_classes = len(np.unique(y))

    DUMMY_INT = 42
    DUMMY_STR = '42'
    DUMMY_OBJ = object()

    def assert_fit_params(clf):
        # Function to test that the values are passed correctly to the
        # classifier arguments for non-array type

        assert_equal(clf.dummy_int, DUMMY_INT)
        assert_equal(clf.dummy_str, DUMMY_STR)
        assert_equal(clf.dummy_obj, DUMMY_OBJ)

    fit_params = {'sample_weight': np.ones(n_samples),
                  'class_prior': np.ones(n_classes) / n_classes,
                  'sparse_sample_weight': W_sparse,
                  'sparse_param': P_sparse,
                  'dummy_int': DUMMY_INT,
                  'dummy_str': DUMMY_STR,
                  'dummy_obj': DUMMY_OBJ,
                  'callback': assert_fit_params}
    cval.cross_val_score(clf, X, y, fit_params=fit_params)
Beispiel #7
0
    def __init__(self,  n_mixtures, step_sizes=(0.001,), threshold=0.001):
        """
        Gaussian proposal function for the weights of a GMM.

        Parameters
        ----------
        n_mixtures
        step_sizes

        Notes
        ----------
        The proposal function works by projecting the weight vector w onto the simplex defined by
        w_1 + w_2 + ..... w_n = 1 , 0<=w_i<=1. The change of basis matrix is found by finding n-1 vectors lying on the plane
        and using gramm schmidt to get an orthonormal basis. A Gaussian proposal function in (n-1)-d space is
        used to find the next point on the simplex.
        """
        super(GaussianStepWeightsProposal, self).__init__()
        self.step_sizes = step_sizes
        self.n_mixtures = n_mixtures
        self.count_accepted = np.zeros((len(step_sizes),))
        self.count_illegal =  np.zeros((len(step_sizes),))
        self.count_proposed = np.zeros((len(step_sizes),))
        self.threshold = threshold


        if n_mixtures > 1:
            # get change of basis matrix mapping n dim coodinates to n-1 dim coordinates on simplex
            # x1 + x2 + x3 ..... =1
            points = np.random.dirichlet([1 for i in xrange(n_mixtures)], size=n_mixtures - 1)
            points = points.T
            self.plane_origin = np.ones((n_mixtures)) / float(n_mixtures)
            # get vectors parallel to plane from its center (1/n,1/n,....)
            parallel = points - np.ones(points.shape) / float(n_mixtures)
            # do gramm schmidt to get mutually orthonormal vectors (basis)
            self.e, _ = np.linalg.qr(parallel)
Beispiel #8
0
    def tag_sites(self, scaled_positions, symprec=1e-3):
        """Returns an integer array of the same length as *scaled_positions*, 
        tagging all equivalent atoms with the same index.

        Example:

        >>> from ase.lattice.spacegroup import Spacegroup
        >>> sg = Spacegroup(225)  # fcc
        >>> sg.tag_sites([[0.0, 0.0, 0.0], 
        ...               [0.5, 0.5, 0.0], 
        ...               [1.0, 0.0, 0.0], 
        ...               [0.5, 0.0, 0.0]])
        array([0, 0, 0, 1])
        """
        scaled = np.array(scaled_positions, ndmin=2)
        scaled %= 1.0
        scaled %= 1.0
        tags = -np.ones((len(scaled), ), dtype=int)
        mask = np.ones((len(scaled), ), dtype=np.bool)
        rot, trans = self.get_op()
        i = 0
        while mask.any():
            pos = scaled[mask][0]
            sympos = np.dot(rot, pos) + trans
            # Must be done twice, see the scaled_positions.py test
            sympos %= 1.0
            sympos %= 1.0
            m = ~np.all(np.any(np.abs(scaled[np.newaxis,:,:] - 
                                      sympos[:,np.newaxis,:]) > symprec, 
                               axis=2), axis=0)
            assert not np.any((~mask) & m)
            tags[m] = i
            mask &= ~m
            i += 1
        return tags
def test_pes_multidim_error(Simulator, rng):
    """Test that PES works on error connections mapping from N to 1 dims.

    Note that the transform is applied before the learning rule, so the error
    signal should be 1-dimensional.
    """

    with nengo.Network() as net:
        err = nengo.Node(output=[0])
        ens1 = nengo.Ensemble(20, 3)
        ens2 = nengo.Ensemble(10, 1)

        # Case 1: ens -> ens, weights=False
        conn = nengo.Connection(ens1, ens2,
                                transform=np.ones((1, 3)),
                                solver=nengo.solvers.LstsqL2(weights=False),
                                learning_rule_type={"pes": nengo.PES()})
        nengo.Connection(err, conn.learning_rule["pes"])
        # Case 2: ens -> ens, weights=True
        conn = nengo.Connection(ens1, ens2,
                                transform=np.ones((1, 3)),
                                solver=nengo.solvers.LstsqL2(weights=True),
                                learning_rule_type={"pes": nengo.PES()})
        nengo.Connection(err, conn.learning_rule["pes"])
        # Case 3: neurons -> ens
        conn = nengo.Connection(ens1.neurons, ens2,
                                transform=np.ones((1, ens1.n_neurons)),
                                learning_rule_type={"pes": nengo.PES()})
        nengo.Connection(err, conn.learning_rule["pes"])

    with Simulator(net) as sim:
        sim.run(0.01)
Beispiel #10
0
def reflectivity_amplitude(Q,
                           depth,
                           rho,
                           mu=0,
                           sigma=None,
                           wavelength=1,
                           ):
    """
    Returns the complex reflectivity waveform.

    See reflectivity for details.
    """
    Q = _dense(Q,'d')
    R = numpy.empty(Q.shape,'D')

    n = len(depth)
    if numpy.isscalar(wavelength):
        wavelength=wavelength*numpy.ones(Q.shape, 'd')
    if numpy.isscalar(mu):
        mu = mu*numpy.ones(n, 'd')
    if numpy.isscalar(sigma):
        sigma = sigma*numpy.ones(n-1, 'd')

    wavelength,depth,rho,mu = [_dense(v,'d')
                                 for v in wavelength,depth,rho,mu]

    rho,mu = [v*1e-6 for v in rho,mu]
    if sigma is not None:
        sigma = _dense(sigma, 'd')
        reflmodule._reflectivity_amplitude_rough(rho, mu, depth, sigma, wavelength, Q, R)
    else:
        reflmodule._reflectivity_amplitude (rho, mu, depth, wavelength, Q, R)
    return R
Beispiel #11
0
    def test_multiple_problems(self):
        if MPI:
            # split the comm and run an instance of the Problem in each subcomm
            subcomm = self.comm.Split(self.comm.rank)
            prob = Problem(Group(), impl=impl, comm=subcomm)

            size = 5
            value = self.comm.rank + 1
            values = np.ones(size)*value

            A1 = prob.root.add('A1', IndepVarComp('x', values))
            C1 = prob.root.add('C1', ABCDArrayComp(size))

            prob.root.connect('A1.x', 'C1.a')
            prob.root.connect('A1.x', 'C1.b')

            prob.setup(check=False)
            prob.run()

            # check the first output array and store in result
            self.assertTrue(all(prob['C1.c'] == np.ones(size)*(value*2)))
            result = prob['C1.c']

            # gather the results from the separate processes/problems and check
            # for expected values
            results = self.comm.allgather(result)
            self.assertEqual(len(results), self.comm.size)

            for n in range(self.comm.size):
                expected = np.ones(size)*2*(n+1)
                self.assertTrue(all(results[n] == expected))
Beispiel #12
0
  def compute_distances_no_loops(self, X):
    """
    Compute the distance between each test point in X and each training point
    in self.X_train using no explicit loops.

    Input / Output: Same as compute_distances_two_loops
    """
    num_test = X.shape[0]
    num_train = self.X_train.shape[0]
    dists = np.zeros((num_test, num_train)) 
    #########################################################################
    # TODO:                                                                 #
    # Compute the l2 distance between all test points and all training      #
    # points without using any explicit loops, and store the result in      #
    # dists.                                                                #
    # HINT: Try to formulate the l2 distance using matrix multiplication    #
    #       and two broadcast sums.                                         #
    #########################################################################
    num_feature = X.shape[1]
    A = np.matrix(X);
    B = np.matrix(self.X_train);
    ImT = np.ones((num_feature,num_train));
    Itm = np.ones((num_test,num_feature));
    sums_AB = np.power(A,2)*ImT + Itm*np.power(B.T,2);
    prod_AB = A*B.T;
    dists = np.power(sums_AB - 2*prod_AB,0.5);
    #########################################################################
    #                         END OF YOUR CODE                              #
    #########################################################################
    return dists
Beispiel #13
0
 def __init__(self,scales = [.3,.01,.01,.01,.01,.01]):
     pygame.init()
     pygame.joystick.init()
     self.controller = pygame.joystick.Joystick(0)
     self.controller.init()
     
     self.lStick = LeftStick(self.controller.get_axis(0),
                                self.controller.get_axis(1))
     self.rStick = RightStick(self.controller.get_axis(4),
                                  self.controller.get_axis(3))
     
     # dpad directions ordered as up down left right
     dPadDirs = getDirs(self.controller)
     
     self.dPad = DPad(dPadDirs)
     #self.dPad = DPad(self.controller.get_hat(0))
     self.trigger = Trigger(self.controller.get_axis(2))
     self.inUse = [False,False,False,False]
     
     length = 6
     self.offsets = np.zeros(length)
     self.uScale = np.ones(length)
     self.lScale = np.ones(length)
     self.driftLimit = .05
     self.calibrate()
     self.scales = np.array(scales)
     time.sleep(1)
     self.calibrate()
def test_shuffle_kfold_stratifiedkfold_reproducibility():
    # Check that when the shuffle is True multiple split calls produce the
    # same split when random_state is set
    X = np.ones(15)  # Divisible by 3
    y = [0] * 7 + [1] * 8
    X2 = np.ones(16)  # Not divisible by 3
    y2 = [0] * 8 + [1] * 8

    kf = KFold(3, shuffle=True, random_state=0)
    skf = StratifiedKFold(3, shuffle=True, random_state=0)

    for cv in (kf, skf):
        np.testing.assert_equal(list(cv.split(X, y)), list(cv.split(X, y)))
        np.testing.assert_equal(list(cv.split(X2, y2)), list(cv.split(X2, y2)))

    kf = KFold(3, shuffle=True)
    skf = StratifiedKFold(3, shuffle=True)

    for cv in (kf, skf):
        for data in zip((X, X2), (y, y2)):
            try:
                np.testing.assert_equal(list(cv.split(*data)),
                                        list(cv.split(*data)))
            except AssertionError:
                pass
            else:
                raise AssertionError("The splits for data, %s, are same even "
                                     "when random state is not set" % data)
  def testLoad(self):
    with self.cached_session():
      var = variables.Variable(np.zeros((5, 5), np.float32))
      self.evaluate(variables.global_variables_initializer())
      var.load(np.ones((5, 5), np.float32))

      self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
  def testBasicLSTMCellWithDropout(self):

    def _is_close(x, y, digits=4):
      delta = x - y
      return delta < 10**(-digits)

    def _is_close_in(x, items, digits=4):
      for i in items:
        if _is_close(x, i, digits):
          return True
      return False

    keep_prob = 0.5
    c_high = 2.9998924946
    c_low = 0.999983298578
    h_low = 0.761552567265
    h_high = 0.995008519604
    num_units = 5
    allowed_low = [2, 3]

    with self.test_session() as sess:
      with variable_scope.variable_scope(
          "other", initializer=init_ops.constant_initializer(1)):
        x = array_ops.zeros([1, 5])
        c = array_ops.zeros([1, 5])
        h = array_ops.zeros([1, 5])
        state = core_rnn_cell_impl.LSTMStateTuple(c, h)
        cell = rnn_cell.LayerNormBasicLSTMCell(
            num_units, layer_norm=False, dropout_keep_prob=keep_prob)

        g, s = cell(x, state)
        sess.run([variables.global_variables_initializer()])
        res = sess.run([g, s], {
            x.name: np.ones([1, 5]),
            c.name: np.ones([1, 5]),
            h.name: np.ones([1, 5]),
        })

        # Since the returned tensors are of size [1,n]
        # get the first component right now.
        actual_h = res[0][0]
        actual_state_c = res[1].c[0]
        actual_state_h = res[1].h[0]

        # For each item in `c` (the cell inner state) check that
        # it is equal to one of the allowed values `c_high` (not
        # dropped out) or `c_low` (dropped out) and verify that the
        # corresponding item in `h` (the cell activation) is coherent.
        # Count the dropped activations and check that their number is
        # coherent with the dropout probability.
        dropped_count = 0
        self.assertTrue((actual_h == actual_state_h).all())
        for citem, hitem in zip(actual_state_c, actual_state_h):
          self.assertTrue(_is_close_in(citem, [c_low, c_high]))
          if _is_close(citem, c_low):
            self.assertTrue(_is_close(hitem, h_low))
            dropped_count += 1
          elif _is_close(citem, c_high):
            self.assertTrue(_is_close(hitem, h_high))
        self.assertIn(dropped_count, allowed_low)
def test_stratified_kfold_no_shuffle():
    # Manually check that StratifiedKFold preserves the data ordering as much
    # as possible on toy datasets in order to avoid hiding sample dependencies
    # when possible
    X, y = np.ones(4), [1, 1, 0, 0]
    splits = StratifiedKFold(2).split(X, y)
    train, test = next(splits)
    assert_array_equal(test, [0, 2])
    assert_array_equal(train, [1, 3])

    train, test = next(splits)
    assert_array_equal(test, [1, 3])
    assert_array_equal(train, [0, 2])

    X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
    splits = StratifiedKFold(2).split(X, y)
    train, test = next(splits)
    assert_array_equal(test, [0, 1, 3, 4])
    assert_array_equal(train, [2, 5, 6])

    train, test = next(splits)
    assert_array_equal(test, [2, 5, 6])
    assert_array_equal(train, [0, 1, 3, 4])

    # Check if get_n_splits returns the number of folds
    assert_equal(5, StratifiedKFold(5).get_n_splits(X, y))
Beispiel #18
0
def test_as_float_array():
    # Test function for as_float_array
    X = np.ones((3, 10), dtype=np.int32)
    X = X + np.arange(10, dtype=np.int32)
    # Checks that the return type is ok
    X2 = as_float_array(X, copy=False)
    np.testing.assert_equal(X2.dtype, np.float32)
    # Another test
    X = X.astype(np.int64)
    X2 = as_float_array(X, copy=True)
    # Checking that the array wasn't overwritten
    assert_true(as_float_array(X, False) is not X)
    # Checking that the new type is ok
    np.testing.assert_equal(X2.dtype, np.float64)
    # Here, X is of the right type, it shouldn't be modified
    X = np.ones((3, 2), dtype=np.float32)
    assert_true(as_float_array(X, copy=False) is X)
    # Test that if X is fortran ordered it stays
    X = np.asfortranarray(X)
    assert_true(np.isfortran(as_float_array(X, copy=True)))

    # Test the copy parameter with some matrices
    matrices = [
        np.matrix(np.arange(5)),
        sp.csc_matrix(np.arange(5)).toarray(),
        sparse_random_matrix(10, 10, density=0.10).toarray()
    ]
    for M in matrices:
        N = as_float_array(M, copy=True)
        N[0, 0] = np.nan
        assert_false(np.isnan(M).any())
Beispiel #19
0
    def fitChiSq(self, sampleSpec, returnChiSq=False):

        if np.all(sampleSpec.wave == self.wave):
            newSampleSpec = sampleSpec
            grid = self.values
        else:
            print "error in function"
            newSampleSpec = sampleSpec.interpolate(self.wave)            
            minIDx = self.wave.searchsorted(sampleSpec.wave[0])
            maxIDx = self.wave.searchsorted(sampleSpec.wave[-1])
            grid = self.values[:,minIDx:maxIDx]

        
        if newSampleSpec.var != None:
            var = newSampleSpec.var
        else:
            var = 1.

        if  newSampleSpec.dq != None:
            dqMask = newSampleSpec.dq
        else:
            dqMask = np.ones(grid.shape[1]).astype(bool)

        chiSq = ((grid[:,dqMask]-newSampleSpec.flux[dqMask])/var[dqMask])**2
        nu = (np.ones(grid.shape[0])*grid.shape[1]) - len(self.params) - 1
        redChiSq = np.sum(chiSq, axis=1) / nu

        if returnChiSq:
            return np.min(redChiSq), self.points[np.argmin(redChiSq)]
        else:
            return self.points[np.argmin(redChiSq)]
Beispiel #20
0
    def __init__(self, *args):
        """ Initialization of the perceptron with given sizes.  """

        self.shape = args
        n = len(args)

        # Build layers
        self.layers = []
        # Input layer (+1 unit for bias)
        self.layers.append(np.ones(self.shape[0] + 1))
        # Hidden layer(s) + output layer
        for i in range(1, n):
            self.layers.append(np.ones(self.shape[i]))

        # Build weights matrix (randomly between -0.25 and +0.25)
        self.weights = []
        for i in range(n - 1):
            self.weights.append(np.zeros((self.layers[i].size,
                                          self.layers[i + 1].size)))

        # dw will hold last change in weights (for momentum)
        self.dw = [0, ] * len(self.weights)

        # Reset weights
        self.reset()
Beispiel #21
0
def test_dtype(seed=1234):
    np.random.seed(seed)

    dtype = [
        ("coords", np.float64, (4, )),
        ("log_prior", np.float64),
        ("log_likelihood", np.float64),
        ("accepted", bool)
    ]

    coords = np.random.randn(4)
    state = State(coords)
    assert state.dtype == np.dtype(dtype)

    state = State(coords, face=10.0, blah=6, _hidden=None)
    dtype += [
        ("blah", int),
        ("face", float),
    ]
    assert state.dtype == np.dtype(dtype)

    state = State(coords, face=10.0, blah=6, _hidden=None,
                  matrix=np.ones((3, 1)))
    dtype += [
        ("matrix", float, (3, 1)),
    ]
    assert state.dtype == np.dtype(dtype)

    state = State(coords, face=10.0, blah=6, _hidden=None,
                  matrix=np.ones((3, 1)), vector=np.zeros(3))
    dtype += [
        ("vector", float, (3,)),
    ]
    assert state.dtype == np.dtype(dtype)
  def _compareZeros(self, dtype, fully_defined_shape, use_gpu):
    with self.test_session(use_gpu=use_gpu):
      # Creates a tensor of non-zero values with shape 2 x 3.
      # NOTE(kearnes): The default numpy dtype associated with tf.string is
      # np.object (and can't be changed without breaking a lot things), which
      # causes a TypeError in constant_op.constant below. Here we catch the
      # special case of tf.string and set the numpy dtype appropriately.
      if dtype == dtypes_lib.string:
        numpy_dtype = np.string_
      else:
        numpy_dtype = dtype.as_numpy_dtype
      if fully_defined_shape:
        d = constant_op.constant(
            np.ones((2, 3), dtype=numpy_dtype), dtype=dtype)
      else:
        d = array_ops.placeholder(dtype=dtype)
      # Constructs a tensor of zeros of the same dimensions and type as "d".
      z_var = array_ops.zeros_like(d)
      # Test that the type is correct
      self.assertEqual(z_var.dtype, dtype)
      # Test that the shape is correct
      if fully_defined_shape:
        self.assertEqual([2, 3], z_var.get_shape())

      # Test that the value is correct
      feed_dict = {}
      if not fully_defined_shape:
        feed_dict[d] = np.ones((2, 3), dtype=numpy_dtype)
      z_value = z_var.eval(feed_dict=feed_dict)
      self.assertFalse(np.any(z_value))
      self.assertEqual((2, 3), z_value.shape)
def test_vec_to_sym_matrix():
    # Check error if unsuitable size
    vec = np.ones(31)
    assert_raises_regex(ValueError, 'Vector of unsuitable shape',
                        vec_to_sym_matrix, vec)

    # Check error if given diagonal shape incompatible with vec
    vec = np.ones(3)
    diagonal = np.zeros(4)
    assert_raises_regex(ValueError, 'incompatible with vector',
                        vec_to_sym_matrix, vec, diagonal)

    # Check output value is correct
    vec = np.ones(6, )
    sym = np.array([[sqrt(2), 1., 1.], [1., sqrt(2), 1.],
                    [1., 1., sqrt(2)]])
    assert_array_almost_equal(vec_to_sym_matrix(vec), sym)

    # Check output value is correct with seperate diagonal
    vec = np.ones(3, )
    diagonal = np.ones(3)
    assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym)

    # Check vec_to_sym_matrix is the inverse function of sym_matrix_to_vec
    # when diagonal is included
    assert_array_almost_equal(vec_to_sym_matrix(sym_matrix_to_vec(sym)), sym)

    # when diagonal is discarded
    vec = sym_matrix_to_vec(sym, discard_diagonal=True)
    diagonal = np.diagonal(sym) / sqrt(2)
    assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym)
Beispiel #24
0
def poly2pwl(polycost, Pmin, Pmax, npts):
    """Converts polynomial cost variable to piecewise linear.

    Converts the polynomial cost variable C{polycost} into a piece-wise linear
    cost by evaluating at zero and then at C{npts} evenly spaced points between
    C{Pmin} and C{Pmax}. If C{Pmin <= 0} (such as for reactive power, where
    C{P} really means C{Q}) it just uses C{npts} evenly spaced points between
    C{Pmin} and C{Pmax}.
    """
    pwlcost = polycost
    ## size of piece being changed
    m, n = polycost.shape
    ## change cost model
    pwlcost[:, MODEL]  = PW_LINEAR * ones(m)
    ## zero out old data
    pwlcost[:, COST:COST + n] = zeros(pwlcost[:, COST:COST + n].shape)
    ## change number of data points
    pwlcost[:, NCOST]  = npts * ones(m)

    for i in range(m):
        if Pmin[i] == 0:
            step = (Pmax[i] - Pmin[i]) / (npts - 1)
            xx = range(Pmin[i], step, Pmax[i])
        elif Pmin[i] > 0:
            step = (Pmax[i] - Pmin[i]) / (npts - 2)
            xx = r_[0, range(Pmin[i], step, Pmax[i])]
        elif Pmin[i] < 0 & Pmax[i] > 0:        ## for when P really means Q
            step = (Pmax[i] - Pmin[i]) / (npts - 1)
            xx = range(Pmin[i], step, Pmax[i])
        yy = totcost(polycost[i, :], xx)
        pwlcost[i,      COST:2:(COST + 2*(npts-1)    )] = xx
        pwlcost[i,  (COST+1):2:(COST + 2*(npts-1) + 1)] = yy

    return pwlcost
Beispiel #25
0
def test_ovr_always_present():
    """Test that ovr works with classes that are always present or absent
    """
    # Note: tests is the case where _ConstantPredictor is utilised
    X = np.ones((10, 2))
    X[:5, :] = 0
    y = np.zeros((10, 3))
    y[5:, 0] = 1
    y[:, 1] = 1
    y[:, 2] = 1

    [[int(i >= 5), 2, 3] for i in range(10)]
    ovr = OneVsRestClassifier(LogisticRegression())
    assert_warns(UserWarning, ovr.fit, X, y)
    y_pred = ovr.predict(X)
    assert_array_equal(np.array(y_pred), np.array(y))
    y_pred = ovr.decision_function(X)
    assert_equal(np.unique(y_pred[:, -2:]), 1)
    y_pred = ovr.predict_proba(X)
    assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))

    # y has a constantly absent label
    y = np.zeros((10, 2))
    y[5:, 0] = 1  # variable label
    ovr = OneVsRestClassifier(LogisticRegression())
    assert_warns(UserWarning, ovr.fit, X, y)
    y_pred = ovr.predict_proba(X)
    assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_invalid_seed():
    seed = np.ones((5, 5))
    mask = np.ones((5, 5))
    assert_raises(ValueError, reconstruction, seed * 2, mask,
                  method='dilation')
    assert_raises(ValueError, reconstruction, seed * 0.5, mask,
                  method='erosion')
Beispiel #27
0
def balanceTrials(n_trials, randomize, factors, use_type='int'):
    n_factors = len(factors)
    n_levels = [0] * n_factors
    min_trials = 1.0 #needs to be float or the later ceiling operation will fail
    for f in range(0, n_factors):
        n_levels[f] = len(factors[f])
        min_trials *= n_levels[f] #simulates use of prod(n_levels) in the original code
    
    N = math.ceil(n_trials / min_trials)
    
    output = []
    len1 = min_trials
    len2 = 1
    index = numpy.random.uniform(0, 1, N * min_trials).argsort()
    
    for level, factor in zip(n_levels, factors):
        len1 /= level
        factor = numpy.array(factor, dtype=use_type)
        
        out = numpy.kron(numpy.ones((N, 1)), numpy.kron(numpy.ones((len1, len2)), factor).reshape(min_trials,)).astype(use_type).reshape(N*min_trials,)
        
        if randomize:
            out = [out[i] for i in index]
        
        len2 *= level
        output.append(out)
    
    return output
Beispiel #28
0
    def _compute_multipliers(self, X, y):
        n_samples, n_features = X.shape

        K = self._gram_matrix(X)
        # Solves
        # min 1/2 x^T P x + q^T x
        # s.t.
        #  Gx \coneleq h
        #  Ax = b

        P = cvxopt.matrix(np.outer(y, y) * K)
        q = cvxopt.matrix(-1 * np.ones(n_samples))

        # -a_i \leq 0
        # TODO(tulloch) - modify G, h so that we have a soft-margin classifier
        G_std = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))
        h_std = cvxopt.matrix(np.zeros(n_samples))

        # a_i \leq c
        G_slack = cvxopt.matrix(np.diag(np.ones(n_samples)))
        h_slack = cvxopt.matrix(np.ones(n_samples) * self._c)

        G = cvxopt.matrix(np.vstack((G_std, G_slack)))
        h = cvxopt.matrix(np.vstack((h_std, h_slack)))

        A = cvxopt.matrix(y, (1, n_samples))
        b = cvxopt.matrix(0.0)

        solution = cvxopt.solvers.qp(P, q, G, h, A, b)

        # Lagrange multipliers
        return np.ravel(solution['x'])
    def world2pix(self, lon, lat, energy, combine=False):
        """Convert world to pixel coordinates.

        Parameters
        ----------
        lon, lat, energy

        Returns
        -------
        x, y, z or array with (x, y, z) as columns
        """
        lon = lon.to('deg').value
        lat = lat.to('deg').value
        origin = 0  # convention for gammapy
        x, y, _ = self.wcs.wcs_world2pix(lon, lat, 0, origin)

        z = self.energy_axis.world2pix(energy)

        shape = (x * y * z).shape
        x = x * np.ones(shape)
        y = y * np.ones(shape)
        z = z * np.ones(shape)

        if combine:
            x = np.array(x).flat
            y = np.array(y).flat
            z = np.array(z).flat
            return np.column_stack([z, y, x])
        else:
            return x, y, z
 def testDtype(self):
   with self.test_session():
     d = array_ops.fill([2, 3], 12., name="fill")
     self.assertEqual(d.get_shape(), [2, 3])
     # Test default type for both constant size and dynamic size
     z = array_ops.ones([2, 3])
     self.assertEqual(z.dtype, dtypes_lib.float32)
     self.assertEqual([2, 3], z.get_shape())
     self.assertAllEqual(z.eval(), np.ones([2, 3]))
     z = array_ops.ones(array_ops.shape(d))
     self.assertEqual(z.dtype, dtypes_lib.float32)
     self.assertEqual([2, 3], z.get_shape())
     self.assertAllEqual(z.eval(), np.ones([2, 3]))
     # Test explicit type control
     for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
                   dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
                   dtypes_lib.complex64, dtypes_lib.complex128,
                   dtypes_lib.int64, dtypes_lib.bool):
       z = array_ops.ones([2, 3], dtype=dtype)
       self.assertEqual(z.dtype, dtype)
       self.assertEqual([2, 3], z.get_shape())
       self.assertAllEqual(z.eval(), np.ones([2, 3]))
       z = array_ops.ones(array_ops.shape(d), dtype=dtype)
       self.assertEqual(z.dtype, dtype)
       self.assertEqual([2, 3], z.get_shape())
       self.assertAllEqual(z.eval(), np.ones([2, 3]))
Beispiel #31
0
  def __getitem__(self, index):
    img_id = self.images[index]
    file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
    img_path = os.path.join(self.img_dir, file_name)
    ann_ids = self.coco.getAnnIds(imgIds=[img_id])
    anns = self.coco.loadAnns(ids=ann_ids)
    num_objs = min(len(anns), self.max_objs)
    img = cv2.imread(img_path)
    height, width = img.shape[0], img.shape[1]
    c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
    s = max(img.shape[0], img.shape[1]) * 1.0
    rot = 0

    flipped = False
    if self.split == 'train':
      if not self.opt.not_rand_crop:
            #TODO这里是更改多尺度训练的地方。
        s = s#* np.random.choice(np.arange(0.8, 1.5, 0.1))#change 0.6 1.4
        w_border = self._get_border(128, img.shape[1])
        h_border = self._get_border(128, img.shape[0])
        c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
        c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
      else:
        sf = self.opt.scale
        cf = self.opt.shift
        c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
        c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
        s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
      if np.random.random() < self.opt.aug_rot:# roate aug
        rf = self.opt.rotate
        rot = np.clip(np.random.randn()*rf, -rf*2, rf*2)

      if np.random.random() < self.opt.flip:
        flipped = True
        img = img[:, ::-1, :]
        c[0] =  width - c[0] - 1
  
    trans_input = get_affine_transform(
      c, s, rot, [self.opt.input_res, self.opt.input_res])
    # inp1 = cv2.warpAffine(img, trans_input, 
    #                      (self.opt.input_res, self.opt.input_res),
    #                      flags=cv2.INTER_LINEAR)
    inp = cv2.warpAffine(img, trans_input, 
                         (self.opt.input_res, self.opt.input_res),
                         flags=cv2.INTER_LINEAR)
    inp = (inp.astype(np.float32) / 255.)
    if self.split == 'train' and not self.opt.no_color_aug:
          color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)

    output_res = self.opt.output_res
    input_res = self.opt.input_res
    num_joints = self.num_joints
    
    if self.opt.angle_norm and self.split == 'train': 
      #首先是读取标注文件,获得中心点和头部点获得所有角度的集合 
      angle_list=[]
      gt_masks=[]
      box_sizes=[]
      for k in range(num_objs):
        ann = anns[k]
        bbox = self._coco_box_to_bbox(ann['bbox'])
        pts = np.array(ann['keypoints'][0:3], np.float32).reshape(num_joints, 3)#tmjx
        bbox[:2] = affine_transform(bbox[:2], trans_input)
        bbox[2:] = affine_transform(bbox[2:], trans_input)
        pts[0, :2] = affine_transform(pts[0, :2], trans_input)
        if flipped:
          bbox[[0, 2]] = width - bbox[[2, 0]] - 1
          pts[:, 0] = width - pts[:, 0] - 1
        h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
        ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
        # if ct[0]<0 or ct[0]>input_res - 1 or ct[1]<0 or ct[1]>input_res - 1:
        #       continue
        angle= math.atan2((pts[0, 0] - ct[0]), (pts[0, 1] - ct[1]))
        angle_list.append(angle)
        gt_masks.append([ct[0],ct[1],pts[0, 0],pts[0, 1]])
        box_sizes.append([w,h])
        #下面这段代码求旋转的角度
      # if len(angle_list)==0:
      #       return
      angle_list=np.array(angle_list)%np.pi #首先归一化到np.pi
      angle_int=(angle_list// (np.pi/9)).astype('int')
      angle_b=np.bincount(angle_int)
      index_rot=np.argmax(angle_b)
      ind_rot=(angle_list>(index_rot)*np.pi/9) *  (angle_list<=(index_rot+1)*np.pi/9)
      angle_rot=np.average(angle_list[ind_rot])
      angle_img_rot=angle_rot*(-180)/np.pi
      #这段代码是旋转图像,和中间点特征图,关键点特征图
      matrix = cv2.getRotationMatrix2D(((input_res)/2.0,(input_res)/2.0),angle_img_rot,1)
      inp = cv2.warpAffine(inp,matrix,(self.opt.input_res,self.opt.input_res))
      gt_masks=np.array(gt_masks)
      cx, cy, hx, hy = np.split(  gt_masks, gt_masks.shape[-1], axis=-1)
      coordinates = np.stack([[cx, cy], [hx, hy]])  # [4, 2, nb_bbox, 1]
      # pad 1 to convert from format [x, y] to homogeneous
      # coordinates format [x, y, 1]
      coordinates = np.concatenate( (coordinates,  np.ones((2, 1, coordinates.shape[2], 1), coordinates.dtype)),  axis=1)  # [4, 3, nb_bbox, 1]
      coordinates = coordinates.transpose( (2, 0, 1, 3))  # [nb_bbox, 4, 3, 1]
      rotated_coords = np.matmul(matrix,  coordinates)  # [nb_bbox, 4, 2, 1]
      # rotated_coords = np.matmul(rotate_matrix,  coordinates)  # [nb_bbox, 4, 2, 1]
      rotated_coords = rotated_coords[..., 0].astype(np.float32)
      #结束
    
    hm = np.zeros((self.num_classes, output_res, output_res), dtype=np.float32)
    hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
    dense_kps = np.zeros((num_joints, 2, output_res, output_res), 
                          dtype=np.float32)
    dense_kps_mask = np.zeros((num_joints, output_res, output_res), 
                               dtype=np.float32)
    wh = np.zeros((self.max_objs, 2), dtype=np.float32)
    kps = np.zeros((self.max_objs, num_joints * 2), dtype=np.float32)
    reg = np.zeros((self.max_objs, 2), dtype=np.float32)
    ind = np.zeros((self.max_objs), dtype=np.int64)
    reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
    kps_mask = np.zeros((self.max_objs, self.num_joints * 2), dtype=np.uint8)
    hp_offset = np.zeros((self.max_objs * num_joints, 2), dtype=np.float32)
    hp_ind = np.zeros((self.max_objs * num_joints), dtype=np.int64)
    hp_mask = np.zeros((self.max_objs * num_joints), dtype=np.int64)

    draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
                    draw_umich_gaussian
    gt_det = []
    #缩放相应倍数
    rotated_coords/=input_res/output_res
    box_sizes=np.array(box_sizes)
    box_sizes/=input_res/output_res
    num_objs=len(box_sizes)
    for k in range(num_objs):
      ann = anns[k]
      cls_id = int(ann['category_id']) - 1
      w,h = box_sizes[k]
      h = np.clip(h , 0,  output_res - 1)
      w = np.clip(w , 0, output_res - 1)
      
      if (h > 0 and w > 0) or (rot != 0):
        radius = gaussian_radius((math.ceil(h), math.ceil(w))) *1.2
        sqrt_wh = np.sqrt(np.sqrt(h*w))
        radius_w = radius * np.sqrt(w) / sqrt_wh
        radius_h = radius * np.sqrt(h) / sqrt_wh
        radius_w = self.opt.hm_gauss if self.opt.mse_loss else max(0, np.ceil(radius_w)) 
        radius_h = self.opt.hm_gauss if self.opt.mse_loss else max(0, np.ceil(radius_h)) 
        ct=rotated_coords[k][0]
        if ct[0]<0 or ct[0]>output_res - 1 or ct[1]<0 or ct[1]>output_res - 1:
              continue
        ct_int = ct.astype(np.int32)
        wh[k] = 1. * w, 1. * h
        ind[k] = ct_int[1] * output_res + ct_int[0]
        reg[k] = ct - ct_int
        reg_mask[k] = 1
        num_kpts = 1#pts[:, 2].sum()
        if num_kpts == 0:
          hm[cls_id, ct_int[1], ct_int[0]] = 0.9999
          reg_mask[k] = 0

        hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))
        hp_radius = self.opt.hm_gauss \
                    if self.opt.mse_loss else max(0, int(hp_radius)) 
        for j in range(num_joints):
          pts[j, :2]=rotated_coords[k][1]
          if pts[j, 0] >= 0 and pts[j, 0] < output_res and \
              pts[j, 1] >= 0 and pts[j, 1] < output_res:
            kps[k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int
            kps_mask[k, j * 2: j * 2 + 2] = 1
            pt_int = pts[j, :2].astype(np.int32)
            hp_offset[k * num_joints + j] = pts[j, :2] - pt_int
            hp_ind[k * num_joints + j] = pt_int[1] * output_res + pt_int[0]
            hp_mask[k * num_joints + j] = 1
            if self.opt.dense_hp:
              # must be before draw center hm gaussian
              draw_dense_reg(dense_kps[j], hm[cls_id], ct_int, 
                              pts[j, :2] - ct_int, radius, is_offset=True)
              draw_gaussian(dense_kps_mask[j], ct_int, radius)
            draw_gaussian(hm_hp[j], pt_int, hp_radius)
        #TODO change
        angle= math.atan2((pts[0, 0] - ct[0]), (pts[0, 1] - ct[1]))
        # angle_list.append(angle)
        draw_gaussian(hm[cls_id], ct_int, [radius_w,radius_h,angle])
        # draw_gaussian(hm[cls_id], ct_int, radiusw,radius)
        gt_det.append([ct[0] - w / 2, ct[1] - h / 2, 
                       ct[0] + w / 2, ct[1] + h / 2, 1] + 
                       pts[:, :2].reshape(num_joints * 2).tolist() + [cls_id])
    if rot != 0:
      hm = hm * 0 + 0.9999
      reg_mask *= 0
      kps_mask *= 0
      
    inp = (inp - self.mean) / self.std
    inp = inp.transpose(2, 0, 1)
    ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh,
           'hps': kps, 'hps_mask': kps_mask}
    
    if self.opt.dense_hp:
      dense_kps = dense_kps.reshape(num_joints * 2, output_res, output_res)
      dense_kps_mask = dense_kps_mask.reshape(
        num_joints, 1, output_res, output_res)
      dense_kps_mask = np.concatenate([dense_kps_mask, dense_kps_mask], axis=1)
      dense_kps_mask = dense_kps_mask.reshape(
        num_joints * 2, output_res, output_res)
      ret.update({'dense_hps': dense_kps, 'dense_hps_mask': dense_kps_mask})
      del ret['hps'], ret['hps_mask']
    if self.opt.reg_offset:
      ret.update({'reg': reg})
    if self.opt.hm_hp:
      ret.update({'hm_hp': hm_hp})
    if self.opt.reg_hp_offset:
      ret.update({'hp_offset': hp_offset, 'hp_ind': hp_ind, 'hp_mask': hp_mask})
    if self.opt.debug > 0 or not self.split == 'train':
      gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
               np.zeros((1, 40), dtype=np.float32)
      meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}
      ret['meta'] = meta
#这里是调试可视化生成的特征图的程序
    # debugger = Debugger(dataset=self.opt.dataset, ipynb=(self.opt.debug==3),
    #                     theme=self.opt.debugger_theme)
    # inp1 = inp.transpose(1,2,0)
    # inp1=(inp1*self.std + self.mean)*255.
    # self.debug(debugger, inp1,  ret)
    return ret
Beispiel #32
0
 def __init__(self):
     super(NetE, self).__init__()
     self.w = Parameter(Tensor(np.ones([16, 16, 3, 3]).astype(np.float32)), name='w')
# Here, since our data are on a grid, we can use ``connectivity=None`` to
# trigger optimized grid-based code, and run the clustering algorithm.

titles.append('Clustering')
# Reshape data to what is equivalent to (n_samples, n_space, n_time)
X.shape = (n_subjects, width, width)
# Compute threshold from t distribution (this is also the default)
threshold = stats.distributions.t.ppf(1 - alpha, n_subjects - 1)
t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test(
    X,
    n_jobs=1,
    threshold=threshold,
    connectivity=None,
    n_permutations=n_permutations)
# Put the cluster data in a viewable format
p_clust = np.ones((width, width))
for cl, p in zip(clusters, p_values):
    p_clust[cl] = p
ts.append(t_clust)
ps.append(p_clust)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])

###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# This method can also be used in this context to correct for small
# variances [1]_:
titles.append(r'$\mathbf{C_{hat}}$')
stat_fun_hat = partial(ttest_1samp_no_p, sigma=sigma)
t_hat, clusters, p_values, H0 = permutation_cluster_1samp_test(
Beispiel #34
0
def _psd_lomb(t, x, window=None, freq=None, ofac=1):
    """
    Periodogram estimate for irregular sampling, Lomb-Scargle method

    Parameters
    ----------
    t : ndarray, (n,)
        time, monotonic but possibly irregularly spaced
    x : ndarray, (n,)
        signal, real or complex
    window : None or ndarray, (n,)
        if not None, a uniformly-sampled data window
    freq : ndarray (nfreq,)
        evaluation frequencies in cycles per unit time
    ofac : integer
        oversampling factor; defaults to 1

    Returns
    -------
    P : Bunch

        - P.F: Frequencies (units: cycles per time unit) of the Pxx estimates.
        - P.Pxx: One-sided auto-spectral density estimate for real(`x`).

        if `x` is complex:

        - P.Pyy: as above, for imag(`x`)
        - P.Pxy: complex cross-spectrum between real(`x`) and imag(`x`)

    Notes
    -----
    If `freq` is None, `P.F` is calculated to coincide with the
    Fourier frequencies for a series of n uniformly distributed
    times from min(`t`) to max(`t`). The mean and Nyquist are omitted
    because they are irrelevant in this context.

    PSD units are [`x`-units^2 per cycle per unit time]

    """
    out = Bunch()

    # copy inputs
    x = np.array(x)
    t = np.array(t, dtype=float)

    # remove mean
    x -= x.mean()

    n = len(x)

    if window is None:
        w = np.ones(t.shape, dtype=float)
    else:
        # interpolate window from uniform grid to nonuniform t
        t_uniform = np.linspace(np.min(t), np.max(t), n)
        w = np.interp(t, t_uniform, window)

        x *= w

    # Estimated record length as n delta-t, where delta-t
    # is the *average* time per sample.
    delta_t = (t[-1] - t[0]) / (n-1)
    reclen = n * delta_t

    if freq is None:
        ofac = int(round(ofac))
        nf = n * ofac  # number of "Fourier frequencies" based on oversampling
        # Simplify by ignoring the 0 and Nyquist frequencies.
        # Divide by reclen to convert cycles/record to cycles/time unit.
        freq = np.arange(1, nf//2) / reclen

    out.F = freq

    xr = np.real(x)

    # signal.lombscargle returns "(A**2) * N/4 for a harmonic signal
    # with amplitude A for sufficiently large N."
    # It takes *angular* frequencies as 3rd argument.
    freq_radian = freq * 2 * np.pi
    psdnorm = 2 * delta_t * n / (w**2).sum()
    out.Pxx = psdnorm * signal.lombscargle(t, xr, freq_radian)

    if x.dtype.kind == 'f':
        return out

    out.Pyy = psdnorm * signal.lombscargle(t, x.imag, freq_radian)

    # If we need to limit memory usage and don't want to use
    # Cython, we can segment the frequencies and loop over the
    # segments.  The speed penalty will be minimal.
    out.Pxy = psdnorm * _ls_cross(t, x, freq_radian)

    return out
Beispiel #35
0
    def matching_gen(A, K, D, m, eta, gamma, model_var):
        K += epsilon

        mseed = np.size(np.where(A.flat)) // 2

        if type(model_var) == tuple:
            mv1, mv2 = model_var
        else:
            mv1, mv2 = model_var, model_var

        if mv1 in ('powerlaw', 'power_law'):
            Fd = D**eta
        elif mv1 in ('exponential', ):
            Fd = np.exp(eta * D)

        if mv2 in ('powerlaw', 'power_law'):
            Fk = K**gamma
        elif mv2 in ('exponential', ):
            Fk = np.exp(gamma * K)

        Ff = Fd * Fk * np.logical_not(A)
        u, v = np.where(np.triu(np.ones((n, n)), 1))

        for ii in range(mseed, m):
            C = np.append(0, np.cumsum(Ff[u, v]))
            r = np.sum(np.random.random() * C[-1] >= C)
            uu = u[r]
            vv = v[r]
            A[uu, vv] = A[vv, uu] = 1

            updateuu, = np.where(np.inner(A, A[:, uu]))
            np.delete(updateuu, np.where(updateuu == uu))
            np.delete(updateuu, np.where(updateuu == vv))

            c1 = np.append(A[:, uu], A[uu, :])
            for i in range(len(updateuu)):
                j = updateuu[i]
                c2 = np.append(A[:, j], A[j, :])

                use = np.logical_or(c1, c2)
                use[uu] = use[uu + n] = use[j] = use[j + n] = 0
                ncon = np.sum(c1[use]) + np.sum(c2[use])
                if ncon == 0:
                    K[uu, j] = K[j, uu] = epsilon
                else:
                    K[uu, j] = K[j, uu] = (
                        2 / ncon * np.sum(np.logical_and(c1[use], c2[use])) +
                        epsilon)

            updatevv, = np.where(np.inner(A, A[:, vv]))
            np.delete(updatevv, np.where(updatevv == uu))
            np.delete(updatevv, np.where(updatevv == vv))

            c1 = np.append(A[:, vv], A[vv, :])
            for i in range(len(updatevv)):
                j = updatevv[i]
                c2 = np.append(A[:, j], A[j, :])

                use = np.logical_or(c1, c2)
                use[vv] = use[vv + n] = use[j] = use[j + n] = 0
                ncon = np.sum(c1[use]) + np.sum(c2[use])
                if ncon == 0:
                    K[vv, j] = K[j, vv] = epsilon
                else:
                    K[vv, j] = K[j, vv] = (
                        2 / ncon * np.sum(np.logical_and(c1[use], c2[use])) +
                        epsilon)

            Ff = Fd * Fk * np.logical_not(A)

        return A
Beispiel #36
0
    def deg_gen(A, K, D, m, eta, gamma, model_var, s_fun):
        mseed = np.size(np.where(A.flat)) // 2

        k = np.sum(A, axis=1)

        if type(model_var) == tuple:
            mv1, mv2 = model_var
        else:
            mv1, mv2 = model_var, model_var

        if mv1 in ('powerlaw', 'power_law'):
            Fd = D**eta
        elif mv1 in ('exponential', ):
            Fd = np.exp(eta * D)

        if mv2 in ('powerlaw', 'power_law'):
            Fk = K**gamma
        elif mv2 in ('exponential', ):
            Fk = np.exp(gamma * K)

        P = Fd * Fk * np.logical_not(A)
        u, v = np.where(np.triu(np.ones((n, n)), 1))

        b = np.zeros((m, ), dtype=int)

        #        print(mseed)
        #        print(np.shape(u),np.shape(v))
        #        print(np.shape(b))
        #        print(np.shape(A[u,v]))
        #        print(np.shape(np.where(A[u,v])), 'sqishy')
        #        print(np.shape(P), 'squnnaq')

        #b[:mseed] = np.where(A[np.ix_(u,v)])
        b[:mseed] = np.squeeze(np.where(A[u, v]))
        #print(mseed, m)
        for i in range(mseed, m):
            C = np.append(0, np.cumsum(P[u, v]))
            r = np.sum(np.random.random() * C[-1] >= C)
            uu = u[r]
            vv = v[r]
            k[uu] += 1
            k[vv] += 1

            if mv2 in ('powerlaw', 'power_law'):
                Fk[:, uu] = Fk[uu, :] = s_fun(k, k[uu])**gamma
                Fk[:, vv] = Fk[vv, :] = s_fun(k, k[vv])**gamma
            elif mv2 in ('exponential', ):
                Fk[:, uu] = Fk[uu, :] = np.exp(s_fun(k, k[uu]) * gamma)
                Fk[:, vv] = Fk[vv, :] = np.exp(s_fun(k, k[vv]) * gamma)

            P = Fd * Fk

            b[i] = r

            P[u[b[:i]], v[b[:i]]] = P[v[b[:i]], u[b[:i]]] = 0

            A[u[r], v[r]] = A[v[r], u[r]] = 1
            #P[b[u[:i]], b[v[:i]]] = P[b[v[:i]], b[u[:i]]] = 0

            #A[uu,vv] = A[vv,uu] = 1


#        indx = v*n + u
#        indx[b]
#
#        nH = np.zeros((n,n))
#        nH.ravel()[indx[b]]=1
#
#        nG = np.zeros((n,n))
#        nG[ u[b], v[b] ]=1
#        nG = nG + nG.T
#
#        print(np.shape(np.where(A != nG)))
#
#        import pdb
#        pdb.set_trace()

        return A
Beispiel #37
0
    def clu_gen(A, K, D, m, eta, gamma, model_var, x_fun):
        mseed = np.size(np.where(A.flat)) // 2

        A = A > 0

        if type(model_var) == tuple:
            mv1, mv2 = model_var
        else:
            mv1, mv2 = model_var, model_var

        if mv1 in ('powerlaw', 'power_law'):
            Fd = D**eta
        elif mv1 in ('exponential', ):
            Fd = np.exp(eta * D)

        if mv2 in ('powerlaw', 'power_law'):
            Fk = K**gamma
        elif mv2 in ('exponential', ):
            Fk = np.exp(gamma * K)

        c = clustering_coef_bu(A)
        k = np.sum(A, axis=1)

        Ff = Fd * Fk * np.logical_not(A)
        u, v = np.where(np.triu(np.ones((n, n)), 1))

        #print(mseed, m)
        for i in range(mseed + 1, m):
            C = np.append(0, np.cumsum(Ff[u, v]))
            r = np.sum(np.random.random() * C[-1] >= C)
            uu = u[r]
            vv = v[r]
            A[uu, vv] = A[vv, uu] = 1
            k[uu] += 1
            k[vv] += 1

            bu = A[uu, :].astype(bool)
            bv = A[vv, :].astype(bool)
            su = A[np.ix_(bu, bu)]
            sv = A[np.ix_(bu, bu)]

            bth = np.logical_and(bu, bv)
            c[bth] += 2 / (k[bth]**2 - k[bth])
            c[uu] = np.size(np.where(su.flat)) / (k[uu] * (k[uu] - 1))
            c[vv] = np.size(np.where(sv.flat)) / (k[vv] * (k[vv] - 1))
            c[k <= 1] = 0
            bth[uu] = 1
            bth[vv] = 1

            k_result = x_fun(c, bth)

            #print(np.shape(k_result))
            #print(np.shape(K))
            #print(K)
            #print(np.shape(K[bth,:]))

            K[bth, :] = k_result
            K[:, bth] = k_result.T

            if mv2 in ('powerlaw', 'power_law'):
                Ff[bth, :] = Fd[bth, :] * K[bth, :]**gamma
                Ff[:, bth] = Fd[:, bth] * K[:, bth]**gamma
            elif mv2 in ('exponential', ):
                Ff[bth, :] = Fd[bth, :] * np.exp(K[bth, :]) * gamma
                Ff[:, bth] = Fd[:, bth] * np.exp(K[:, bth]) * gamma

            Ff = Ff * np.logical_not(A)

        return A
Beispiel #38
0
def test_combine_misfits():
    source, targets = scenario('wellposed', 'noisefree')

    p = ToyProblem(name='toy_problem',
                   ranges={
                       'north': gf.Range(start=-10., stop=10.),
                       'east': gf.Range(start=-10., stop=10.),
                       'depth': gf.Range(start=0., stop=10.)
                   },
                   base_source=source,
                   targets=targets)

    ngx, ngy, ngz = 11, 11, 11
    xg = num.zeros((ngz * ngy * ngx, 3))

    xbounds = p.get_parameter_bounds()
    cx = num.linspace(xbounds[0][0], xbounds[0][1], ngx)
    cy = num.linspace(xbounds[1][0], xbounds[1][1], ngy)
    cz = num.linspace(xbounds[2][0], xbounds[2][1], ngz)

    xg[:, 0] = num.tile(cx, ngy * ngz)
    xg[:, 1] = num.tile(num.repeat(cy, ngx), ngz)
    xg[:, 2] = num.repeat(cz, ngx * ngy)

    misfitss = p.evaluate_many(xg)
    # misfitss[imodel, itarget, 0], misfitss[imodel, itarget, 1]
    gms = p.combine_misfits(misfitss)
    gms_contrib = p.combine_misfits(misfitss, get_contributions=True)

    # gms[imodel]
    # gms_contrib[imodel, itarget]

    bweights = num.ones((2, p.ntargets))
    gms_2 = p.combine_misfits(misfitss, extra_weights=bweights)
    gms_2_contrib = p.combine_misfits(misfitss,
                                      extra_weights=bweights,
                                      get_contributions=True)

    # gms_2[imodel, ibootstrap]
    # gms_2_contrib[imodel, ibootstrap, itarget]

    for ix, x in enumerate(xg):
        misfits = p.evaluate(x)
        # misfits[itarget, 0], misfits[itarget, 1]
        gm = p.combine_misfits(misfits)
        # gm is scalar
        t.assert_equal(gm, gms[ix])

        gm_contrib = p.combine_misfits(misfits, get_contributions=True)

        assert_ae(gms_contrib[ix, :], gm_contrib)

        gm_2 = p.combine_misfits(misfits, extra_weights=bweights)

        assert gm_2[0] == gm
        assert gm_2[1] == gm
        assert gms_2[ix, 0] == gm
        assert gms_2[ix, 1] == gm

        gm_2_contrib = p.combine_misfits(misfits,
                                         extra_weights=bweights,
                                         get_contributions=True)

        assert_ae(gm_2_contrib[0, :], gm_contrib)
        assert_ae(gm_2_contrib[1, :], gm_contrib)
        assert_ae(gms_2_contrib[ix, 0, :], gm_contrib)
        assert_ae(gms_2_contrib[ix, 1, :], gm_contrib)
Beispiel #39
0
    pass

std2 = data.std() ** 2
data = (data - data.mean()) / data.std() # Calculating anomaly and normalizing
time = np.arange(0, data.size) * dt + t0 # Time array in time units of your choice 
alpha, _, _ = kpy.wavelet.ar1(data) # Lag-1 autocorrelation for white noise
mother = kpy.wavelet.Morlet(6.) # Morlet mother wavelet with wavenumber=6

wave, scales, freqs, coi, dj, s0, J = kpy.wavelet.cwt(data, dt, dj=1./12, s0=-1, J=-1, wavelet=mother)
power = (np.abs(wave)) ** 2 # Normalized wavelet power spectrum
period = 1. / freqs

print(len(scales), 'scales')

signif, fft_theor = kpy.wavelet.significance(1.0, dt, scales, 0, alpha, wavelet=mother)
sig95 = np.ones([1, data.size]) * signif[:, None]
sig95 = power / sig95 # Where ratio > 1, power is significant

# Calculates the global wavelet spectrum and determines its significance level.
glbl_power = std2  * power.mean(axis=1)
dof = data.size - scales # Correction for padding at edges
glbl_signif, tmp = kpy.wavelet.significance(std2 , dt, scales, 1, alpha,
                                        dof=dof, wavelet=mother)

# First sub-plot, the original time series anomaly.
ax = plt.axes([0.1, 0.75, 0.65, 0.2])
ax.plot(time, data, 'k', linewidth=1.5)
ax.set_title('a) %s' % (title, ))
if units != '':
  ax.set_ylabel(r'%s [$%s$]' % (label, units,))
else:
Beispiel #40
0
])
y = np.concatenate(
    [phi_toplot, phi_toplot + 2 * np.pi, phi_toplot, phi_toplot + 2 * np.pi])
plot(x, y, 'o', markersize=1)
xlabel('Theta phase (rad)')
ylabel('SWR PCA phase (rad)')

subplot(2, 3, 5)
H, xedges, yedges = np.histogram2d(y, x, 50)
H = gaussFilt(H, (3, 3))
imshow(H, origin='lower', interpolation='nearest', aspect='auto')

# 3d plot
order = np.argsort(phi)
swr_modth = swr_modth[order]
phi = phi[order]
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=phi.min(), vmax=phi.max())
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=jet)

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
tmp = np.arange(len(phi))
for idx in range(len(swr_modth)):
    line = swr_modth[idx]
    colorVal = scalarMap.to_rgba(phi[idx])
    ax.plot(times,
            np.ones(len(swr_modth[idx])) * tmp[idx],
            line,
            color=colorVal)
import numpy as np
import pickle
import keras
from keras.models import Model , Sequential
from keras.layers import Dense, Input, Reshape, Lambda, Concatenate
from keras import backend as K
import tensorflow as tf
from keras import objectives , optimizers, callbacks
import matplotlib.pyplot as plt
import h5py
import os
import Kalman_tools
from Kalman_tools import expectation, maximization, EM_step, E_log_P_x_and_z, KF_predict

#np.random.seed(423)

w_dim, z_dim, v_dim, x_dim, u_dim = 4, 4, 4, 2, 2
intermediate_dim = 4
    
mu_0, Sig_0 = np.zeros([z_dim,1]), np.eye(z_dim)
A,b,H,Q = np.eye(z_dim) + np.random.uniform(-0.1,0.1,z_dim*z_dim).reshape([z_dim,z_dim]), np.zeros([z_dim,1]), np.ones([z_dim, v_dim])/v_dim, np.eye(z_dim)
C,d,R = np.ones([w_dim, z_dim])/z_dim + np.random.uniform(-0.1,0.1,w_dim*z_dim).reshape([w_dim,z_dim]), np.zeros([w_dim,1]), np.eye(w_dim)

Beispiel #42
0
    def _set_view_slice(self):
        """Set the view given the indices to slice with."""
        self._new_empty_slice()
        not_disp = self._dims_not_displayed

        # Check if requested slice outside of data range
        indices = np.array(self._slice_indices)
        extent = self._extent_data
        if np.any(
                np.less(
                    [indices[ax] for ax in not_disp],
                    [extent[0, ax] for ax in not_disp],
                )) or np.any(
                    np.greater(
                        [indices[ax] for ax in not_disp],
                        [extent[1, ax] for ax in not_disp],
                    )):
            return
        self._empty = False

        if self.multiscale:
            # If 3d redering just show lowest level of multiscale
            if self._ndisplay == 3:
                self.data_level = len(self.data) - 1

            # Slice currently viewed level
            level = self.data_level
            indices = np.array(self._slice_indices)
            downsampled_indices = (indices[not_disp] /
                                   self.downsample_factors[level, not_disp])
            downsampled_indices = np.round(
                downsampled_indices.astype(float)).astype(int)
            downsampled_indices = np.clip(
                downsampled_indices, 0, self.level_shapes[level, not_disp] - 1)
            indices[not_disp] = downsampled_indices

            scale = np.ones(self.ndim)
            for d in self._dims_displayed:
                scale[d] = self.downsample_factors[self.data_level][d]
            self._transforms['tile2data'].scale = scale

            if self._ndisplay == 2:
                for d in self._dims_displayed:
                    indices[d] = slice(
                        self.corner_pixels[0, d],
                        self.corner_pixels[1, d] + 1,
                        1,
                    )
                self._transforms['tile2data'].translate = (
                    self.corner_pixels[0] *
                    self._transforms['tile2data'].scale)
            image = self.data[level][tuple(indices)]
            image_indices = indices

            # Slice thumbnail
            indices = np.array(self._slice_indices)
            downsampled_indices = (
                indices[not_disp] /
                self.downsample_factors[self._thumbnail_level, not_disp])
            downsampled_indices = np.round(
                downsampled_indices.astype(float)).astype(int)
            downsampled_indices = np.clip(
                downsampled_indices,
                0,
                self.level_shapes[self._thumbnail_level, not_disp] - 1,
            )
            indices[not_disp] = downsampled_indices

            thumbnail_source = self.data[self._thumbnail_level][tuple(indices)]
        else:
            self._transforms['tile2data'].scale = np.ones(self.ndim)
            image_indices = self._slice_indices
            image = self.data[image_indices]

            # For single-scale we don't request a separate thumbnail_source
            # from the ChunkLoader because in ImageSlice.chunk_loaded we
            # call request.thumbnail_source() and it knows to just use the
            # image itself is there is no explicit thumbnail_source.
            thumbnail_source = None

        # Load our images, might be sync or async.
        data = SliceDataClass(self, image_indices, image, thumbnail_source)
        self._load_slice(data)
Beispiel #43
0
    """Dummy classifier to test the cross-validation"""

    def __init__(self, a=0):
        self.a = a

    def fit(self, X, Y):
        return self

    def predict(self, T):
        return T.shape[0]

    def score(self, X=None, Y=None):
        return 1. / (1 + np.abs(self.a))


X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) / 2

##############################################################################
# Tests


def test_kfold():
    # Check that errors are raised if there is not enough samples
    assert_raises(ValueError, cval.KFold, 3, 4)
    y = [0, 0, 1, 1, 2]
    assert_raises(ValueError, cval.StratifiedKFold, y, 3)

    # Check all indices are returned in the test folds
    kf = cval.KFold(300, 3)
def p04mt3d(exe_name_mf, exe_name_mt, model_ws, mixelm):
            	
    nlay = 1
    nrow = 100
    ncol = 100
    delr = 10
    delc = 10
    delv = 1
    Lx = (ncol - 1) * delr
    Ly = (nrow - 1) * delc
    Ls = np.sqrt(Lx ** 2 + Ly ** 2)
    v = 1.
    prsity = 0.14
    q = v * prsity
    al = 2.
    trpt = .1
    q0 = 0.01
    c0 = 1000.

    perlen_mf = 1000.
    perlen_mt = 1000.
    hk = 1.
    laytyp = 0

    modelname_mf = 'p04_mf'
    mf = flopy.modflow.Modflow(modelname=modelname_mf, model_ws=model_ws, exe_name=exe_name_mf)
    dis = flopy.modflow.ModflowDis(mf, nlay=nlay, nrow=nrow, ncol=ncol,
                                   delr=delr, delc=delc, top=0., botm=[0 - delv],
                                   perlen=perlen_mf)
    ibound = np.ones((nlay, nrow, ncol), dtype=np.int) * -1
    ibound[:, 1:nrow - 1, 1:ncol-1] = 1

    # set strt as a linear gradient at a 45 degree angle
    h1 = q * Ls
    x = dis.sr.xcentergrid
    y = dis.sr.ycentergrid
    a = -1
    b = -1
    c = 1
    d = abs(a*x + b*y + c) / np.sqrt(2)
    strt = h1 - d / Ls * h1

    bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=strt)
    lpf = flopy.modflow.ModflowLpf(mf, hk=hk, laytyp=laytyp)
    wel = flopy.modflow.ModflowWel(mf, stress_period_data=[[0, 79, 20, q0]])
    pcg = flopy.modflow.ModflowPcg(mf)
    lmt = flopy.modflow.ModflowLmt(mf)
    mf.write_input()
    mf.run_model(silent=True)

    modelname_mt = 'p04_mt'
    mt = flopy.mt3d.Mt3dms(modelname=modelname_mt, model_ws=model_ws, 
                           exe_name=exe_name_mt, modflowmodel=mf)
    btn = flopy.mt3d.Mt3dBtn(mt, icbund=1, prsity=prsity, sconc=0)
    dceps = 1.e-5
    nplane = 1
    npl = 0
    nph = 16
    npmin = 2
    npmax = 32
    dchmoc=1.e-3
    nlsink = nplane
    npsink = nph
    adv = flopy.mt3d.Mt3dAdv(mt, mixelm=mixelm, dceps=dceps, nplane=nplane, 
                             npl=npl, nph=nph, npmin=npmin, npmax=npmax,
                             nlsink=nlsink, npsink=npsink, percel=0.5)
    dsp = flopy.mt3d.Mt3dDsp(mt, al=al, trpt=trpt)
    spd = {0:[0, 79, 20, c0, 2]}
    ssm = flopy.mt3d.Mt3dSsm(mt, stress_period_data=spd)
    gcg = flopy.mt3d.Mt3dGcg(mt)
    mt.write_input()
    fname = os.path.join(model_ws, 'MT3D001.UCN')
    if os.path.isfile(fname):
        os.remove(fname)
    mt.run_model(silent=True)
    
    fname = os.path.join(model_ws, 'MT3D001.UCN')
    ucnobj = flopy.utils.UcnFile(fname)
    times = ucnobj.get_times()
    conc = ucnobj.get_alldata()

    fname = os.path.join(model_ws, 'MT3D001.OBS')
    if os.path.isfile(fname):
        cvt = mt.load_obs(fname)
    else:
        cvt = None

    fname = os.path.join(model_ws, 'MT3D001.MAS')
    mvt = mt.load_mas(fname)

    return mf, mt, conc, cvt, mvt
C1 = (.5 + (1/3)*np.cos(24) - (4/3)*np.cos(6)) / np.sin(6)
C2 = 4/3
x_true = lambda t : C1 * np.sin(t) + C2 * np.cos(t) - (1/3) * np.cos(4*t)

# Boundaries
x0 = 1
T = 6
xT = 0.5

# a)

dt = 0.1
t = np.arange(0, T + dt, dt)
n = t.size

v = -2 * np.ones(n - 2)
u = np.ones(n - 3)
A = (1 / dt ** 2) * (np.diag(v) + np.diag(u, 1) + np.diag(u, -1))
A += np.eye(n - 2)

b = 5 * np.cos(4 * t[1:-1])
b[0] = b[0] - x0 / dt ** 2
b[-1] = b[-1] - xT / dt ** 2
b = b.reshape((-1, 1))

A9 = A.copy()
A10 = b.copy()

# b)

x_int = scipy.linalg.solve(A, b)
reveal_type(np.ones_like(A))  # E: numpy.ndarray
reveal_type(np.ones_like(C))  # E: numpy.ndarray
reveal_type(np.ones_like(B))  # E: SubClass
reveal_type(np.ones_like(B, dtype=np.int64))  # E: numpy.ndarray

reveal_type(np.empty_like(A))  # E: numpy.ndarray
reveal_type(np.empty_like(C))  # E: numpy.ndarray
reveal_type(np.empty_like(B))  # E: SubClass
reveal_type(np.empty_like(B, dtype=np.int64))  # E: numpy.ndarray

reveal_type(np.full_like(A, i8))  # E: numpy.ndarray
reveal_type(np.full_like(C, i8))  # E: numpy.ndarray
reveal_type(np.full_like(B, i8))  # E: SubClass
reveal_type(np.full_like(B, i8, dtype=np.int64))  # E: numpy.ndarray

reveal_type(np.ones(1))  # E: numpy.ndarray
reveal_type(np.ones([1, 1, 1]))  # E: numpy.ndarray

reveal_type(np.full(1, i8))  # E: numpy.ndarray
reveal_type(np.full([1, 1, 1], i8))  # E: numpy.ndarray

reveal_type(np.indices([1, 2, 3]))  # E: numpy.ndarray
reveal_type(np.indices([1, 2, 3], sparse=True))  # E: tuple[numpy.ndarray]

reveal_type(np.fromfunction(func, (3, 5)))  # E: SubClass

reveal_type(np.identity(10))  # E: numpy.ndarray

reveal_type(np.atleast_1d(A))  # E: numpy.ndarray
reveal_type(np.atleast_1d(C))  # E: numpy.ndarray
reveal_type(np.atleast_1d(A, A))  # E: list[numpy.ndarray]
def Construction_A(Nx, Ny, dx, Neuf_points, k2_eau, k2_bois, gamma_eau, gamma_bois, rho_eau, v_eau, p_source,
                   SourceCylindrique, Source_Lineaire, Source_Ponctuelle, Map, \
                   N_PML, Source_Map, Q_map, coeff, centre_bois_x, centre_bois_y, Nx_Bois, Ny_Bois, alpha_Map, omega,
                   B_eau, PML_mode=1, TF_SF=True):
    h = dx
    # **********************Construction de la matrice A************************

    # L'ordre des coefficients est toujours
    # [p(i-2,j),p(i-1,j) ,p(i,j-2),p(i,j-1),p(i,j),p(i+1,j),p(i+2,j),p(i,j+1),p(i,j+2)]

    # Cas 1:
    if Neuf_points == True:
        Coeff1 = [0, 1, 0, 1, -(4 - k2_eau * h ** 2), 1, 0, 1, 0]
    else:
        # Version à 9 points
        # [p(i-1,j-1),p(i-1,j) ,p(i-1,j+1),p(i,j-1),p(i,j),p(i,j+1),p(i+1,j-1),p(i+1,j),p(i+1,j+1)]
        Coeff1 = [1, 4, 1, 4, -11 + 6 * h ** 2 * k2_eau, 4, 1, 4, 1]

    # Cas 2:
    if Neuf_points == True:
        Coeff2 = [0, 1, 0, 1, -(4 - k2_bois * h ** 2), 1, 0, 1, 0]
    else:

        # Version à 9 points
        # [p(i-1,j-1),p(i-1,j) ,p(i-1,j+1),p(i,j-1),p(i,j),p(i,j+1),p(i+1,j-1),p(i+1,j),p(i+1,j+1)]
        Coeff2 = [1, 4, 1, 4, -11 + 6 * h ** 2 * k2_bois, 4, 1, 4, 1]

    # Cas 3 à 10:

    Coeff3 = Coeff_Frontiere(gamma_eau, gamma_bois, -1 / np.sqrt(2), -1 / np.sqrt(2))
    Coeff4 = Coeff_Frontiere(gamma_eau, gamma_bois, 0, -1)
    Coeff5 = Coeff_Frontiere(gamma_bois, gamma_eau, 1 / np.sqrt(2), -1 / np.sqrt(2))  # -ny
    Coeff6 = Coeff_Frontiere(gamma_bois, gamma_eau, 1, 0)
    Coeff7 = Coeff_Frontiere(gamma_bois, gamma_eau, 1 / np.sqrt(2), 1 / np.sqrt(2))
    Coeff8 = Coeff_Frontiere(gamma_bois, gamma_eau, 0, 1)
    Coeff9 = Coeff_Frontiere(gamma_eau, gamma_bois, -1 / np.sqrt(2), 1 / np.sqrt(2))
    Coeff10 = Coeff_Frontiere(gamma_eau, gamma_bois, -1, 0)

    # Cas 11 à 12 (triangle)
    # Cas 11
    Nx11 = -np.cos(coeff / 2)  # -
    Ny11 = -np.sin(coeff / 2)  # -
    Coeff11 = Coeff_Frontiere(gamma_eau, gamma_bois, Nx11, Ny11)
    # Cas 12
    Nx12 = np.cos(coeff / 2)
    Ny12 = -np.sin(coeff / 2)
    Coeff12 = Coeff_Frontiere(gamma_eau, gamma_bois, Nx12, Ny12)

    # Cas 13 (Cercle)
    # Voir la boucle plus bas

    # Cas 14 à 21 (PML):Dans les fonctions suivantes

    # Cas 22 (source): Option 2
    # Coeff22 = [0, 1, 0, 1, -(4 - k2_eau * h ** 2), 1, 0, 1, 0]

    Dict_Coeff = {1: Coeff1, 2: Coeff2, 3: Coeff3, 4: Coeff4, 5: Coeff5, 6: Coeff6, 7: Coeff7, 8: Coeff8, 9: Coeff9,
                  10: Coeff10, 11: Coeff11, 12: Coeff12}

    # A = np.zeros([Nx * Ny, Nx * Ny], dtype=complex)
    b = np.zeros([Nx * Ny], dtype=complex)
    b_TFSF = np.zeros([Nx * Ny], dtype=complex)

    data_A = []
    ligne_A = []
    colonne_A = []

    # Matrice sans bois
    data_Q = []
    ligne_Q = []
    colonne_Q = []

    # Q = np.zeros([Nx * Ny, Nx * Ny], dtype=int)

    if PML_mode == 2:
        PML_Range = 22
    elif PML_mode == 1:
        PML_Range = 21

    Source_mask = np.ones([Ny, Nx], dtype=np.complex) * np.finfo(float).eps
    Source_mask[1:-1, 1:-1] = 0
    Source_mask[N_PML + 2:Nx - N_PML - 2, N_PML + 2:Nx - N_PML - 2] = 1
    
    #    Source_mask[N_PML-1,N_PML-1:Nx-N_PML] = 0
    #    Source_mask[N_PML-1:Nx-N_PML,N_PML-1] = 0
    #    Source_mask[Nx-N_PML,N_PML-1:Nx-N_PML] = 0
    #    Source_mask[N_PML-1:Nx-N_PML+1,Nx-N_PML] = 0

    for i in range(Nx):
        for j in range(Ny):
            L = p(i, j, Nx)

            Type = int(Map[i, j])

            if np.logical_and(Type >= 14, Type <= PML_Range):
                if PML_mode == 1:
                    Coefficient = Coeff_PML(Type, i, j, h, Nx, Ny, k2_eau, v_eau, N_PML)
                if PML_mode == 2:
                    alpha = alpha_Map[i, j]
                    Coefficient = Coeff_PML2(Type, h, Nx, Ny, omega, B_eau, alpha, rho_eau)

            elif Type == 13:
                Nx13 = (i - centre_bois_x) / coeff
                # Coordonnées en y du centre du cercle
                centre_y = centre_bois_y - Ny_Bois / 2 + np.sqrt(coeff ** 2 - (Nx_Bois / 2) ** 2)
                Ny13 = (j - centre_y) / coeff
                Coefficient = Coeff_Frontiere(gamma_eau, gamma_bois, Nx13, Ny13)
            else:
                if Type != 0:
                    Coefficient = Dict_Coeff[Type]

            if np.logical_and(np.logical_or(Type == 1, Type == 2), Neuf_points == True):
                Position = [p(i - 1, j - 1, Nx), p(i - 1, j, Nx), p(i - 1, j + 1, Nx), p(i, j - 1, Nx), p(i, j, Nx),
                            p(i, j + 1, Nx),
                            p(i + 1, j - 1, Nx), p(i + 1, j, Nx), p(i + 1, j + 1, Nx)]
            else:
                Position = [p(i - 2, j, Nx), p(i - 1, j, Nx), p(i, j - 2, Nx), p(i, j - 1, Nx), p(i, j, Nx),
                            p(i + 1, j, Nx), p(i + 2, j, Nx),
                            p(i, j + 1, Nx), p(i, j + 2, Nx)]
            if TF_SF == True:
                data_Q.append(Q_map[i, j])
                ligne_Q.append(L)
                colonne_Q.append(L)

            for k, pos in enumerate(Position):
                # if np.logical_and(pos >= 0, pos < (Nx * Ny)):
                if Coefficient[k] != 0:
                    data_A.append(Coefficient[k])
                    ligne_A.append(L)
                    colonne_A.append(pos)
                    # A[L, int(pos)] = Coefficient[k]
            b[L] = Source_Map[i, j] * Source_mask[i, j] * h ** 2 * rho_eau * p_source

    A_sp = scipy.sparse.coo_matrix((data_A, (ligne_A, colonne_A)), shape=(Nx ** 2, Nx ** 2), dtype=np.complex)
    A_sp = A_sp.tocsc()  # scipy.sparse.csc_matrix(A)
    if TF_SF == True:
        Q_sp = scipy.sparse.coo_matrix((data_Q, (ligne_Q, colonne_Q)), shape=(Nx ** 2, Nx ** 2), dtype=np.complex)
        Q_sp = Q_sp.tocsc()  # scipy.sparse.csc_matrix(A)
        b_TFSF = (Q_sp.dot(A_sp) - A_sp.dot(Q_sp)).dot(b)
    else:
        b_TFSF = b

    return A_sp, b_TFSF
qr = q[0]
qi = q[1]
qj = q[2]
qk = q[3]
#print qr,qi,qj,qk
#print [-qr*qk, -qi*qk, -qj*qk, qr**2+qi**2+qj**2]
Qr = np.array([[qi**2+qj**2+qk**2,-qr*qi, -qr*qj,-qr*qk],
    [-qr*qi,qr**2+qj**2+qk**2,-qi*qj,-qi*qk],
    [-qr*qj,-qi*qj,qr**2+qi**2+qk**2,-qj*qk],
    [-qr*qk, -qi*qk, -qj*qk, qr**2+qi**2+qj**2]])

dnormq_dq = (qr**2 + qi**2 + qj**2 + qk**2)**(-1.5)*Qr

print dnormq_dq

Sigma = np.ones((19,19))

#print qr,qi,qj,qk
#print [-qr*qk, -qi*qk, -qj*qk, qr**2+qi**2+qj**2]
Qr = np.array([[qi**2+qj**2+qk**2,-qr*qi, -qr*qj,-qr*qk],
    [-qr*qi,qr**2+qj**2+qk**2,-qi*qj,-qi*qk],
    [-qr*qj,-qi*qj,qr**2+qi**2+qk**2,-qj*qk],
    [-qr*qk, -qi*qk, -qj*qk, qr**2+qi**2+qj**2]])

dnormq_dq = (qr**2 + qi**2 + qj**2 + qk**2)**(-1.5)*Qr
Sigma_q = dnormq_dq.dot(Sigma[3:7,3:7].dot(dnormq_dq.T))



#Sigma[0:3,0:3] = Sigma[0:3,0:3]
Sigma[0:3,3:7] = np.dot(Sigma[0:3,3:7].copy(),dnormq_dq.T)
Beispiel #49
0
 def transform(self, X):
     Xfit = np.where(X >= self.nu, X, np.maximum(2*self.nu - self.nu*self.nu/X, -1e-10 * np.ones(X.shape)))
     return Xfit
Beispiel #50
0
from skimage import io
from sklearn.utils.linear_assignment_ import linear_assignment
import glob
import time
import argparse
import cv2

video_path = input("input the video name : ")
vid = cv2.VideoCapture(video_path)

frame_h, frame_w = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(
    vid.get(cv2.CAP_PROP_FRAME_WIDTH))

mot_tracker = Sort()

mask = np.ones((frame_h, frame_w), dtype=np.uint8)
old_gray = np.zeros_like(mask)
dets = np.array([[831, 156, 1002, 249, 100]], dtype=np.uint16)
num = 0

while True:
    print(num)
    ret, frame = vid.read()

    if not ret:
        break

    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    mask, track_bbs_ids = mot_tracker.update(dets, old_gray, frame_gray, mask)
    print(track_bbs_ids)
Beispiel #51
0
    def testjoining(self):
        numberToDo = 1
        dim = 2
        level = 2
        siglength = iisignature.siglength(dim, level)
        for fixedPoint, inputDim, fixed in [(float('nan'), dim, False),
                                            (0.1, dim - 1, True)]:
            pathLength = 10

            def makePath():
                p = numpy.random.uniform(size=(pathLength, dim))
                if fixed:
                    p[:, -1] = fixedPoint * numpy.arange(pathLength)
                return p

            paths = [makePath() for i in range(numberToDo)]
            sig = numpy.vstack(
                [iisignature.sig(path, level) for path in paths])

            joinee = numpy.zeros((numberToDo, siglength))
            for i in range(1, pathLength):
                displacements = [
                    path[i:(i + 1), :] - path[(i - 1):i, :] for path in paths
                ]
                displacement = numpy.vstack(displacements)
                if fixed:
                    displacement = displacement[:, :-1]
                joinee = iisignature.sigjoin(joinee, displacement, level,
                                             fixedPoint)
            self.assertLess(
                diff(sig, joinee), 0.0001,
                "fullSig matches sig" + (" with fixed Dim" if fixed else ""))

            extra = numpy.random.uniform(size=(numberToDo, inputDim))
            bumpedExtra = 1.001 * extra
            bumpedJoinee = 1.001 * joinee
            base = numpy.sum(
                iisignature.sigjoin(joinee, extra, level, fixedPoint))
            bump1 = numpy.sum(
                iisignature.sigjoin(bumpedJoinee, extra, level, fixedPoint))
            bump2 = numpy.sum(
                iisignature.sigjoin(joinee, bumpedExtra, level, fixedPoint))
            derivsOfSum = numpy.ones((numberToDo, siglength))
            calculated = iisignature.sigjoinbackprop(derivsOfSum, joinee,
                                                     extra, level, fixedPoint)
            self.assertEqual(len(calculated), 3 if fixed else 2)
            diff1 = (bump1 - base) - numpy.sum(calculated[0] *
                                               (bumpedJoinee - joinee))
            diff2 = (bump2 - base) - numpy.sum(calculated[1] *
                                               (bumpedExtra - extra))
            #print ("\n",bump1,bump2,base,diff1,diff2)
            self.assertLess(
                numpy.abs(diff1), 0.000001,
                "diff1 as expected " + (" with fixed Dim" if fixed else ""))
            self.assertLess(
                numpy.abs(diff2), 0.00001,
                "diff2 as expected " + (" with fixed Dim" if fixed else ""))
            if fixed:
                bumpedFixedPoint = fixedPoint * 1.01
                bump3 = numpy.sum(
                    iisignature.sigjoin(joinee, extra, level,
                                        bumpedFixedPoint))
                diff3 = (bump3 - base -
                         numpy.sum(calculated[2] *
                                   (bumpedFixedPoint - fixedPoint)))
                #print("\n",bump3,base, fixedPoint, bumpedFixedPoint, calculated[2])
                self.assertLess(numpy.abs(diff3), 0.00001, "diff3")
Beispiel #52
0
 def transform(self, X):
     Xfit = np.where(X >= self.limit, self.limit * np.ones(X.shape), X)
     return Xfit
Beispiel #53
0
 def test_slicing_fails_non_namedtuple(self):
     v = value_impl.to_value(np.ones([10, 10, 10], dtype=np.float32), None,
                             context_stack_impl.context_stack)
     with self.assertRaisesRegex(TypeError,
                                 'only supported for named tuples'):
         _ = v[:1]
def moving_average(interval, window_size):
    window = np.ones(int(window_size)) / float(window_size)
    averages = np.convolve(interval, window, 'same')
    return averages[window_size - 1:averages.size]
Beispiel #55
0
print(lst)
mtx = np.arange(0, 10)
mtx2 = np.arange(0, 15, 3)

print(mtx)
print(mtx2)

n1, n2 = 1, 9

# creat matrex  like range put defrint in consept row and col
n1, n2 = np.mgrid[2:5, 4:7]

# vector of zeros
z = np.zeros(7)
# vector of ones
one = np.ones(8)

# matrex of zeros
zz = np.zeros((3, 4))

# matrex of ones
ones = np.ones((3, 4))

# matrex of num
fl = np.full((6, 7), 34)

# فيكتور رنج من 0 ل 15 وبيزود ب ال 0.1
mtx2 = np.arange(0, 15, 0.1)
mtx2
# بيرجع فيكتور وانا
# بديله انا عايز كام رقم وخلاص مليش دعوة ب الستيب
Beispiel #56
0
print(np.array(signal_label))
print(label)
print(signal_label * label)
print(np.matmul(signal_label, label))
print(np.transpose(signal_label * label))
ans = np.divide(np.transpose(signal_label * label),
                np.matmul(signal_label, label))
print(ans)
print(ans[:, 1].tolist())
label_signal = np.array([[.444444444, .34782609], [0.55555555, .65217391]])
print(np.matmul(signal_label, label_signal))
print(np.matmul(signal_label, ans[:, 1]))

matrix = []
p = 1 / 5
prob_vec = (p * np.ones(5)).tolist()
target_label_space = {1, 2, 3, 4}
for label in target_label_space:
    matrix.append(prob_vec)


def generate_item_conditional_signal_priors(item_list, target_label_space,
                                            signal_space):
    item_conditional_signal_priors = {}
    signal_space_length = len(signal_space)

    for item in item_list:
        matrix = []
        for label in target_label_space:
            prob_vec = np.random.random(signal_space_length)
            prob_vec /= prob_vec.sum()
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""

#Fitting Linear Regression to Set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,y_train)

#Predicting Test result
y_pred = regressor.predict(X_test)

#Building optimal model using Backward Elimination
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1)

#Option 1 - automatic - with function
def backwardElimination(x, sl):
    numVars = len(x[0])
    for i in range(0, numVars):
        regressor_OLS = sm.OLS(y, x).fit()
        maxVar = max(regressor_OLS.pvalues).astype(float)
        if maxVar > sl:
            for j in range(0, numVars - i):
                if (regressor_OLS.pvalues[j].astype(float) == maxVar):
                    x = np.delete(x, j, 1)
    regressor_OLS.summary()
    return x
 
SL = 0.05
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
               random_state=None):
    """Search for a partition matrix (clustering) which is closest to the
    eigenvector embedding.

    Parameters
    ----------
    vectors : array-like, shape: (n_samples, n_clusters)
        The embedding space of the samples.

    copy : boolean, optional, default: True
        Whether to copy vectors, or perform in-place normalization.

    max_svd_restarts : int, optional, default: 30
        Maximum number of attempts to restart SVD if convergence fails

    n_iter_max : int, optional, default: 30
        Maximum number of iterations to attempt in rotation and partition
        matrix search if machine precision convergence is not reached

    random_state : int, RandomState instance or None (default)
        Determines random number generation for rotation matrix initialization.
        Use an int to make the randomness deterministic.
        See :term:`Glossary <random_state>`.

    Returns
    -------
    labels : array of integers, shape: n_samples
        The labels of the clusters.

    References
    ----------

    - Multiclass spectral clustering, 2003
      Stella X. Yu, Jianbo Shi
      https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf

    Notes
    -----

    The eigenvector embedding is used to iteratively search for the
    closest discrete partition.  First, the eigenvector embedding is
    normalized to the space of partition matrices. An optimal discrete
    partition matrix closest to this normalized embedding multiplied by
    an initial rotation is calculated.  Fixing this discrete partition
    matrix, an optimal rotation matrix is calculated.  These two
    calculations are performed until convergence.  The discrete partition
    matrix is returned as the clustering solution.  Used in spectral
    clustering, this method tends to be faster and more robust to random
    initialization than k-means.

    """

    from scipy.sparse import csc_matrix
    from scipy.linalg import LinAlgError

    random_state = check_random_state(random_state)

    vectors = as_float_array(vectors, copy=copy)

    eps = np.finfo(float).eps
    n_samples, n_components = vectors.shape

    # Normalize the eigenvectors to an equal length of a vector of ones.
    # Reorient the eigenvectors to point in the negative direction with respect
    # to the first element.  This may have to do with constraining the
    # eigenvectors to lie in a specific quadrant to make the discretization
    # search easier.
    norm_ones = np.sqrt(n_samples)
    for i in range(vectors.shape[1]):
        vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
            * norm_ones
        if vectors[0, i] != 0:
            vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])

    # Normalize the rows of the eigenvectors.  Samples should lie on the unit
    # hypersphere centered at the origin.  This transforms the samples in the
    # embedding space to the space of partition matrices.
    vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]

    svd_restarts = 0
    has_converged = False

    # If there is an exception we try to randomize and rerun SVD again
    # do this max_svd_restarts times.
    while (svd_restarts < max_svd_restarts) and not has_converged:

        # Initialize first column of rotation matrix with a row of the
        # eigenvectors
        rotation = np.zeros((n_components, n_components))
        rotation[:, 0] = vectors[random_state.randint(n_samples), :].T

        # To initialize the rest of the rotation matrix, find the rows
        # of the eigenvectors that are as orthogonal to each other as
        # possible
        c = np.zeros(n_samples)
        for j in range(1, n_components):
            # Accumulate c to ensure row is as orthogonal as possible to
            # previous picks as well as current one
            c += np.abs(np.dot(vectors, rotation[:, j - 1]))
            rotation[:, j] = vectors[c.argmin(), :].T

        last_objective_value = 0.0
        n_iter = 0

        while not has_converged:
            n_iter += 1

            t_discrete = np.dot(vectors, rotation)

            labels = t_discrete.argmax(axis=1)
            vectors_discrete = csc_matrix(
                (np.ones(len(labels)), (np.arange(0, n_samples), labels)),
                shape=(n_samples, n_components))

            t_svd = vectors_discrete.T * vectors

            try:
                U, S, Vh = np.linalg.svd(t_svd)
                svd_restarts += 1
            except LinAlgError:
                print("SVD did not converge, randomizing and trying again")
                break

            ncut_value = 2.0 * (n_samples - S.sum())
            if ((abs(ncut_value - last_objective_value) < eps) or
                    (n_iter > n_iter_max)):
                has_converged = True
            else:
                # otherwise calculate rotation and continue
                last_objective_value = ncut_value
                rotation = np.dot(Vh.T, U.T)

    if not has_converged:
        raise LinAlgError('SVD did not converge')
    return labels
Beispiel #59
0
def run():
    train_data, valid_data, tests_data = load_data_dicts(
        N_train, N_valid, N_tests)
    parser, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes)
    N_weight_types = len(parser.names)
    hyperparams = VectorParser()
    hyperparams['log_param_scale'] = np.full(N_weight_types,
                                             init_log_param_scale)
    hyperparams['log_alphas'] = np.full((N_iters, N_weight_types),
                                        init_log_alphas)
    hyperparams['invlogit_betas'] = np.full((N_iters, N_weight_types),
                                            init_invlogit_betas)
    for name in parser.names:
        hyperparams[('rescale', name)] = np.full(N_iters, init_rescales)
    fixed_hyperparams = VectorParser()
    fixed_hyperparams['log_L2_reg'] = np.full(N_weight_types, init_log_L2_reg)

    def primal_optimizer(hyperparam_vect, i_hyper):
        def indexed_loss_fun(w, L2_vect, i_iter):
            rs = RandomState(
                (seed, i_hyper,
                 i_iter))  # Deterministic seed needed for backwards pass.
            idxs = rs.randint(N_train, size=batch_size)
            return loss_fun(w, train_data['X'][idxs], train_data['T'][idxs],
                            L2_vect)

        learning_curve_dict = defaultdict(list)

        def callback(x, v, g, i_iter):
            if i_iter % thin == 0:
                learning_curve_dict['learning_curve'].append(
                    loss_fun(x, **train_data))
                learning_curve_dict['grad_norm'].append(np.linalg.norm(g))
                learning_curve_dict['weight_norm'].append(np.linalg.norm(x))
                learning_curve_dict['velocity_norm'].append(np.linalg.norm(v))

        cur_hyperparams = hyperparams.new_vect(hyperparam_vect)
        rs = RandomState((seed, i_hyper))
        W0 = fill_parser(parser, np.exp(cur_hyperparams['log_param_scale']))
        W0 *= rs.randn(W0.size)
        alphas = np.exp(cur_hyperparams['log_alphas'])
        betas = logit(cur_hyperparams['invlogit_betas'])
        L2_reg = fill_parser(parser, np.exp(fixed_hyperparams['log_L2_reg']))
        W_opt = sgd_parsed(grad(indexed_loss_fun),
                           kylist(W0, alphas, betas, L2_reg),
                           parser,
                           callback=callback)
        return W_opt, learning_curve_dict

    def hyperloss(hyperparam_vect, i_hyper):
        W_opt, _ = primal_optimizer(hyperparam_vect, i_hyper)
        return loss_fun(W_opt, **train_data)

    hyperloss_grad = grad(hyperloss)

    meta_results = defaultdict(list)
    old_metagrad = [np.ones(hyperparams.vect.size)]

    def meta_callback(hyperparam_vect, i_hyper, metagrad=None):
        x, learning_curve_dict = primal_optimizer(hyperparam_vect, i_hyper)
        cur_hyperparams = hyperparams.new_vect(hyperparam_vect.copy())
        for field in cur_hyperparams.names:
            meta_results[field].append(cur_hyperparams[field])
        meta_results['train_loss'].append(loss_fun(x, **train_data))
        meta_results['valid_loss'].append(loss_fun(x, **valid_data))
        meta_results['tests_loss'].append(loss_fun(x, **tests_data))
        meta_results['test_err'].append(frac_err(x, **tests_data))
        meta_results['learning_curves'].append(learning_curve_dict)
        if metagrad is not None:
            meta_results['meta_grad_magnitude'].append(
                np.linalg.norm(metagrad))
            meta_results['meta_grad_angle'].append(np.dot(old_metagrad[0], metagrad) \
                                                   / (np.linalg.norm(metagrad)*
                                                      np.linalg.norm(old_metagrad[0])))
        old_metagrad[0] = metagrad
        print "Meta Epoch {0} Train loss {1:2.4f} Valid Loss {2:2.4f}" \
              " Test Loss {3:2.4f} Test Err {4:2.4f}".format(
            i_hyper, meta_results['train_loss'][-1], meta_results['valid_loss'][-1],
            meta_results['train_loss'][-1], meta_results['test_err'][-1])

    initial_hypergrad = hyperloss_grad(hyperparams.vect, 0)
    parsed_init_hypergrad = hyperparams.new_vect(initial_hypergrad.copy())
    final_result = adam(hyperloss_grad, hyperparams.vect, meta_callback,
                        N_meta_iter, meta_alpha)
    meta_callback(final_result, N_meta_iter)
    parser.vect = None  # No need to pickle zeros
    return meta_results, parser, parsed_init_hypergrad
Beispiel #60
0
def ap(S, maxits, convits, dampfact):
		n=S.shape[0]

		#Create empty Availability and Responsibility matrix and Exemplars list
		A=np.zeros((n, n))
		R=np.zeros((n, n))
		exemplars=[]
		count=0

		#start iterations
		for m in range(0, maxits):
		      # Compute responsibilities
			Rold = R
			AS = A + S
			Y= AS.max(1)
			I= AS.argmax(1)
			for i in range(n) :
				AS[i,I[i]] = -1000000
			Y2 = AS.max(1)
			I2 = AS.argmax(1)
			temp=np.repeat(Y, n).reshape(n, n)
			R = S - temp
			for i in range(n) :
				R[i,I[i]] = S[i,I[i]]-Y2[i]
			R = (1-dampfact)*R+dampfact*Rold


			# Compute availabilities
			Aold = A
			Rp = np.maximum(R,0)
			for i in range(n) :
				Rp[i,i] = R[i,i]
			temp2=np.ones((n,1))
			temp3=Rp.sum(0)
			A = np.kron(temp2, temp3)
			A= A-Rp
			diag = np.diag(A)
			A = np.minimum(A,0)
			for i in range(n) :
				A[i,i] = diag[i]
			A = (1-dampfact)*A + dampfact*Aold


			tempexemplars= []
			for i in range(0, n):
				if (R[i,i]+A[i,i])>0:
					tempexemplars.append(i)

			if(tempexemplars==exemplars):
				count=count+1
				if(count==convits):
					break
			else:
				count=0
				exemplars=list(tempexemplars)

		#Assigning datapoints to Exemplar
		assignment= np.zeros(n)

		for i in range(0,n):
			closest=0;
			currentbest=-1000000
			for j in range(0, len(exemplars)):
				if S[i,exemplars[j]]>currentbest:
					currentbest=S[i,exemplars[j]]
					closest=exemplars[j]
				if i==exemplars[j]:
					closest=exemplars[j]
					break
			assignment[i]=closest


		return assignment