def testRsvd(self): 
     n = 100 
     m = 80
     A = sppy.rand((m, n), 0.5)
     
     ks = [10, 20, 30, 40] 
     q = 2 
     
     lastError = numpy.linalg.norm(A.toarray())        
     
     for k in ks: 
         U, s, V = sppy.linalg.rsvd(A, k, q)
         
         nptst.assert_array_almost_equal(U.T.dot(U), numpy.eye(k))
         nptst.assert_array_almost_equal(V.T.dot(V), numpy.eye(k))
         A2 = (U*s).dot(V.T)
         
         error = numpy.linalg.norm(A.toarray() - A2)
         self.assertTrue(error <= lastError)
         lastError = error 
         
         #Compare versus exact svd 
         U, s, V = numpy.linalg.svd(numpy.array(A.toarray()))
         inds = numpy.flipud(numpy.argsort(s))[0:k*2]
         U, s, V = Util.indSvd(U, s, V, inds)
         
         Ak = (U*s).dot(V.T)
         
         error2 = numpy.linalg.norm(A.toarray() - Ak)
         self.assertTrue(error2 <= error)
Example #2
0
def eGs(GAF, GFA, kA, kF, expQFF):
    """
    Calculate eGAF, probabilities from transitions from apparently open to
    shut states regardles of when the transition occurs. Thease are Laplace
    transform of eGAF(t) when s=0. Used to calculat initial HJC vectors (HJC92).
    eGAF*(s=0) = (I - GAF * (I - expQFF) * GFA)^-1 * GAF * expQFF
    To caculate eGFA exhange A by F and F by A in function call.

    Parameters
    ----------
    GAF : array_like, shape (kA, kF)
    GFA : array_like, shape (kF, kA)
    kA : int
        A number of open states in kinetic scheme.
    kF : int
        A number of shut states in kinetic scheme.
    
    Returns
    -------
    eGAF : array_like, shape (kA, kF)
    """

    temp = np.eye(kA) - np.dot(np.dot(GAF, np.eye(kF) - expQFF), GFA)
    eGAF = np.dot(np.dot(nplin.inv(temp), GAF), expQFF)
    return eGAF
  def __init__(self, left_low=True, right_low=True, name="VelocityDrivetrainModel"):
    super(VelocityDrivetrainModel, self).__init__(name)
    self._drivetrain = drivetrain.Drivetrain(left_low=left_low,
                                             right_low=right_low)
    self.dt = 0.005
    self.A_continuous = numpy.matrix(
        [[self._drivetrain.A_continuous[1, 1], self._drivetrain.A_continuous[1, 3]],
         [self._drivetrain.A_continuous[3, 1], self._drivetrain.A_continuous[3, 3]]])

    self.B_continuous = numpy.matrix(
        [[self._drivetrain.B_continuous[1, 0], self._drivetrain.B_continuous[1, 1]],
         [self._drivetrain.B_continuous[3, 0], self._drivetrain.B_continuous[3, 1]]])
    self.C = numpy.matrix(numpy.eye(2))
    self.D = numpy.matrix(numpy.zeros((2, 2)))

    self.A, self.B = self.ContinuousToDiscrete(self.A_continuous,
                                               self.B_continuous, self.dt)

    # FF * X = U (steady state)
    self.FF = self.B.I * (numpy.eye(2) - self.A)

    self.PlaceControllerPoles([0.67, 0.67])
    self.PlaceObserverPoles([0.02, 0.02])

    self.G_high = self._drivetrain.G_high
    self.G_low = self._drivetrain.G_low
    self.resistance = self._drivetrain.resistance
    self.r = self._drivetrain.r
    self.Kv = self._drivetrain.Kv
    self.Kt = self._drivetrain.Kt

    self.U_max = self._drivetrain.U_max
    self.U_min = self._drivetrain.U_min
Example #4
0
def main():
    # create centers
    means = [[1.0, 1.0], [2.5, 2.5]]
    data = []
    rect_start_x = 2.0
    rect_start_y = 2.0
    rect_width = 1.0
    rec_height = 1.0
    for i, mean in enumerate(means):
        class_centers = mvn(mean, np.eye(2), 10)
        class_data = [np.hstack((mvn(class_center, np.eye(2), 10), np.zeros((10, 1)) + i))
                               for class_center in class_centers]
        data.append(class_data)
    data = np.array(data).reshape(200, 3)
    rectangle = np.array([[rect_start_x, rect_start_y],
                          [rect_start_x + rect_width, rect_start_y],
                          [rect_start_x + rect_width, rect_start_y + rec_height],
                          [rect_start_x, rect_start_y + rec_height],
                          [rect_start_x, rect_start_y]]).reshape(5, 2)

    pts = get_points_in_rectangle(rectangle, data)
    posterior = calc_posterior(means, pts)
    print posterior
    for class_data in data:
        plt.plot(class_data[:, 0], class_data[:, 1], 'o', markersize=8)
    plt.plot(rectangle[:, 0], rectangle[:, 1])
    plt.show()
Example #5
0
def _make_eye(shape):
    if len(shape) == 2:
        n = shape[0]
        return numpy.eye(n, dtype=numpy.float32)
    m = shape[0]
    n = shape[1]
    return numpy.array([numpy.eye(n, dtype=numpy.float32)] * m)
Example #6
0
    def fit(self, X, y, **params):
        """
        Fit Ridge regression model

        Parameters
        ----------
        X : numpy array of shape [n_samples,n_features]
            Training data
        y : numpy array of shape [n_samples]
            Target values

        Returns
        -------
        self : returns an instance of self.
        """
        self._set_params(**params)

        X = np.asanyarray(X, dtype=np.float)
        y = np.asanyarray(y, dtype=np.float)

        n_samples, n_features = X.shape

        X, y, Xmean, ymean = self._center_data(X, y)

        if n_samples > n_features:
            # w = inv(X^t X + alpha*Id) * X.T y
            self.coef_ = linalg.solve(np.dot(X.T, X) + self.alpha * np.eye(n_features), np.dot(X.T, y))
        else:
            # w = X.T * inv(X X^t + alpha*Id) y
            self.coef_ = np.dot(X.T, linalg.solve(np.dot(X, X.T) + self.alpha * np.eye(n_samples), y))

        self._set_intercept(Xmean, ymean)
        return self
Example #7
0
def cluster(dist_mat):
  """ Cluster based on difference matrix """
  
  DM = np.ma.masked_array(dist_mat + np.eye(N) * 1*10**8)
  DM.mask = np.tril(np.eye(N))
  num_ts = dist_mat.shape[0] 
  cluster_levels = []

  ts = []
  for i in range(num_ts):
    ts.append(set(i))
  
  count = 0
  while run:
    #XXXXXX broke
    # Find min in DM and join sets
    indexes = np.ma.where(DM == DM.min())
    i_set = set(indexes[0])
    j_set = set(indexes[1])
    cluster_levels[count] = set.union(i_set, j_set)
    count += 1 
    #Exclude using mask 
    DM.mask[indexes] = 1
  
  
  
  for i in indexes[0]:
    for j in indexes[1]:      
      cluster_levels[0] = set.union(ts[i], ts[j])
Example #8
0
def test_modelgen_spm_concat():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename2)
    # Test case when only one duration is passed, as being the same for all onsets.
    s = SpecifySPMModel()
    s.inputs.input_units = 'secs'
    s.inputs.concatenate_runs = True
    setattr(s.inputs, 'output_units', 'secs')
    yield assert_equal, s.inputs.output_units, 'secs'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    s.inputs.high_pass_filter_cutoff = 128.
    info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 170]], durations=[[1]]),
            Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 1
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    yield assert_equal, np.sum(res.outputs.session_info[0]['regress'][0]['val']), 30
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 1
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['duration']), np.array([1., 1., 1., 1., 1., 1., 1., 1.])
    # Test case of scans as output units instead of seconds
    setattr(s.inputs, 'output_units', 'scans')
    yield assert_equal, s.inputs.output_units, 'scans'
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6
    # Test case for no concatenation with seconds as output units
    s.inputs.concatenate_runs = False
    s.inputs.subject_info = deepcopy(info)
    s.inputs.output_units = 'secs'
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['onset']), np.array([2.0, 50.0, 100.0, 170.0])
    # Test case for variable number of events in separate runs, sometimes unique.
    filename3 = os.path.join(tempdir, 'test3.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 30), np.eye(4)).to_filename(filename3)
    s.inputs.functional_runs = [filename1, filename2, filename3]
    info = [Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2]], durations=[[1, 1], [1]]),
            Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2, 4]], durations=[[1, 1], [1, 1]]),
            Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2]], durations=[[1, 1], [1]])]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['duration']), np.array([1., 1.])
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][1]['duration']), np.array([1.,])
    yield assert_almost_equal, np.array(res.outputs.session_info[1]['cond'][1]['duration']), np.array([1., 1.])
    yield assert_almost_equal, np.array(res.outputs.session_info[2]['cond'][1]['duration']), np.array([1.,])
    # Test case for variable number of events in concatenated runs, sometimes unique.
    s.inputs.concatenate_runs = True
    info = [Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2]], durations=[[1, 1], [1]]),
            Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2, 4]], durations=[[1, 1], [1, 1]]),
            Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 3], [2]], durations=[[1, 1], [1]])]
    s.inputs.subject_info = deepcopy(info)
    res = s.run()
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][0]['duration']), np.array([1., 1., 1., 1., 1., 1.])
    yield assert_almost_equal, np.array(res.outputs.session_info[0]['cond'][1]['duration']), np.array([1., 1., 1., 1.])
    rmtree(tempdir)
def calc_acceleration(velocities):
    ## kalman
    
    data = velocities
    ss = 6 # state size
    os = 3 # observation size
    F = np.array([   [1,0,0,1,0,0], # process update
                     [0,1,0,0,1,0],
                     [0,0,1,0,0,1],
                     [0,0,0,1,0,0],
                     [0,0,0,0,1,0],
                     [0,0,0,0,0,1]],                  
                    dtype=np.float)
    H = np.array([   [1,0,0,0,0,0], [0,1,0,0,0,0], [0,0,1,0,0,0]], # observation matrix
                    dtype=np.float)
    Q = 0.01*np.eye(ss) # process noise
    
    R = 1*np.eye(os) # observation noise
    
    initx = np.array([velocities[0,0], velocities[0,1], velocities[0,2], 0, 0, 0], dtype=np.float)
    initv = 0*np.eye(ss)
    xsmooth,Vsmooth = kalman_math.kalman_smoother(data, F, H, Q, R, initx, initv, plot=False)

    accel_smooth = xsmooth[:,3:]*100.
    
    return accel_smooth
Example #10
0
def second_order_solver(FF,GG,HH, eigmax=1.0+1e-6):

    # from scipy.linalg import qz
    from dolo.numeric.extern.qz import qzordered

    from numpy import array,mat,c_,r_,eye,zeros,real_if_close,diag,allclose,where,diagflat
    from numpy.linalg import solve

    Psi_mat = array(FF)
    Gamma_mat = array(-GG)
    Theta_mat = array(-HH)
    m_states = FF.shape[0]

    Xi_mat = r_[c_[Gamma_mat, Theta_mat],
                c_[eye(m_states), zeros((m_states, m_states))]]


    Delta_mat = r_[c_[Psi_mat, zeros((m_states, m_states))],
                   c_[zeros((m_states, m_states)), eye(m_states)]]

    [Delta_up,Xi_up,UUU,VVV,eigval] = qzordered(Delta_mat, Xi_mat,)

    VVVH = VVV.T
    VVV_2_1 = VVVH[m_states:2*m_states, :m_states]
    VVV_2_2 = VVVH[m_states:2*m_states, m_states:2*m_states]
    UUU_2_1 = UUU[m_states:2*m_states, :m_states]
    PP = - solve(VVV_2_1, VVV_2_2)

    # slightly different check than in the original toolkit:
    assert allclose(real_if_close(PP), PP.real)
    PP = PP.real

    return [eigval,PP]
Example #11
0
def test_modelgen_sparse():
    tempdir = mkdtemp()
    filename1 = os.path.join(tempdir, 'test1.nii')
    filename2 = os.path.join(tempdir, 'test2.nii')
    Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename1)
    Nifti1Image(np.random.rand(10, 10, 10, 50), np.eye(4)).to_filename(filename2)
    s = SpecifySparseModel()
    s.inputs.input_units = 'secs'
    s.inputs.functional_runs = [filename1, filename2]
    s.inputs.time_repetition = 6
    info = [Bunch(conditions=['cond1'], onsets=[[0, 50, 100, 180]], durations=[[2]]),
            Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]])]
    s.inputs.subject_info = info
    s.inputs.volumes_in_cluster = 1
    s.inputs.time_acquisition = 2
    s.inputs.high_pass_filter_cutoff = np.inf
    res = s.run()
    yield assert_equal, len(res.outputs.session_info), 2
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    yield assert_equal, len(res.outputs.session_info[0]['cond']), 0
    s.inputs.stimuli_as_impulses = False
    res = s.run()
    yield assert_equal, res.outputs.session_info[0]['regress'][0]['val'][0], 1.0
    s.inputs.model_hrf = True
    res = s.run()
    yield assert_almost_equal, res.outputs.session_info[0]['regress'][0]['val'][0], 0.016675298129743384
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 1
    s.inputs.use_temporal_deriv = True
    res = s.run()
    yield assert_equal, len(res.outputs.session_info[0]['regress']), 2
    yield assert_almost_equal, res.outputs.session_info[0]['regress'][0]['val'][0], 0.016675298129743384
    yield assert_almost_equal, res.outputs.session_info[1]['regress'][1]['val'][5], 0.007671459162258378
    rmtree(tempdir)
 def testImplicitLargeDiag(self):
   mu = np.array([[1., 2, 3],
                  [11, 22, 33]])      # shape: [b, k] = [2, 3]
   u = np.array([[[1., 2],
                  [3, 4],
                  [5, 6]],
                 [[0.5, 0.75],
                  [1, 0.25],
                  [1.5, 1.25]]])      # shape: [b, k, r] = [2, 3, 2]
   m = np.array([[0.1, 0.2],
                 [0.4, 0.5]])         # shape: [b, r] = [2, 2]
   scale = np.stack([
       np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
                             np.transpose(u[0])),
       np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
                             np.transpose(u[1])),
   ])
   cov = np.stack([np.matmul(scale[0], scale[0].T),
                   np.matmul(scale[1], scale[1].T)])
   logging.vlog(2, "expected_cov:\n{}".format(cov))
   with self.test_session():
     mvn = ds.MultivariateNormalDiagPlusLowRank(
         loc=mu,
         scale_perturb_factor=u,
         scale_perturb_diag=m)
     self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
    def update(self, r, g, r0, g0, p0):
        self.I = eye(len(self.atoms) * 3, dtype=int)
        if self.H is None:
            self.H = eye(3 * len(self.atoms))
            #self.B = np.linalg.inv(self.H)
            return
        else:
            dr = r - r0
            dg = g - g0 
            if not ((self.alpha_k > 0 and abs(np.dot(g,p0))-abs(np.dot(g0,p0)) < 0) \
                or self.replay):
                return
            if self.no_update == True:
                print 'skip update'
                return

            try: # this was handled in numeric, let it remaines for more safety
                rhok = 1.0 / (np.dot(dg,dr))
            except ZeroDivisionError:
                rhok = 1000.0
                print "Divide-by-zero encountered: rhok assumed large"
            if isinf(rhok): # this is patch for np
                rhok = 1000.0
                print "Divide-by-zero encountered: rhok assumed large"
            A1 = self.I - dr[:, np.newaxis] * dg[np.newaxis, :] * rhok
            A2 = self.I - dg[:, np.newaxis] * dr[np.newaxis, :] * rhok
            H0 = self.H
            self.H = np.dot(A1, np.dot(self.H, A2)) + rhok * dr[:, np.newaxis] \
                     * dr[np.newaxis, :]
Example #14
0
    def __init__(self, driver, simulation=False):

        self.driver = driver

        self.width = int(self.driver.canvas['width'])
        self.height = int(self.driver.canvas['height'])

        self.driver.root.bind("<Key>", self._check_quit)

        self.driver.root.title('IMU Telemetry')

        # Vehicle dimensions
        W = VEHICLE_SCALE
        D = VEHICLE_SCALE / 2
        L = VEHICLE_SCALE * 2

        #Let these be in World-coordinates (worldview-matrix already applied)
        ####In right-handed, counter-clockwise order
        self.vehicle_points, self.vehicle_faces, self.vehicle_face_colors = self._get_vehicle(W, D, L)

        # Assume no angles to start
        self.yaw_pitch_roll = None

        # Rotation matrices
        self.pitchrot = np.eye(3)
        self.yawrot = np.eye(3)
        self.rollrot = np.eye(3)

        self.simulation = simulation
        self.running = False
Example #15
0
def test_joint_feature_continuous():
    # FIXME
    # first make perfect prediction, including pairwise part
    X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
    x, y = X[0], Y[0]
    n_states = x.shape[-1]

    pw_horz = -1 * np.eye(n_states)
    xx, yy = np.indices(pw_horz.shape)
    # linear ordering constraint horizontally
    pw_horz[xx > yy] = 1

    # high cost for unequal labels vertically
    pw_vert = -1 * np.eye(n_states)
    pw_vert[xx != yy] = 1
    pw_vert *= 10

    # create crf, assemble weight, make prediction
    for inference_method in get_installed(["lp", "ad3"]):
        crf = DirectionalGridCRF(inference_method=inference_method)
        crf.initialize(X, Y)
        w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
        y_pred = crf.inference(x, w, relaxed=True)

        # compute joint_feature for prediction
        joint_feature_y = crf.joint_feature(x, y_pred)
        assert_equal(joint_feature_y.shape, (crf.size_joint_feature,))
Example #16
0
def check_transforms(dataset):
    saved_datas = []
    for data in dataset:
        saved_datas.append(np.array(data))
    tr1 = np.eye(dataset.data_format.num_channels)
    tr1[0, 0] = 2
    tr1[0, 1] = -0.5
    dataset.transform(tr1)
    for saved_data, data in zip(saved_datas, dataset):
        assert np.allclose(np.dot(saved_data, tr1.T), data)

    dataset_copy = DataSet(dataset.data_format)
    for saved_data, copy_data in zip(saved_datas, dataset_copy):
        assert np.allclose(np.dot(saved_data, tr1.T), copy_data)

    tr2 = np.eye(dataset.data_format.num_channels)
    tr2[-1, -1] = -3
    tr2[1, 0] = 2.5
    dataset.transform(tr2)

    for saved_data, copy_data in zip(saved_datas, dataset_copy):
        assert np.allclose(np.dot(saved_data, tr1.T), copy_data)

    tr_both = np.dot(tr2, tr1)
    for saved_data, data in zip(saved_datas, dataset):
        assert np.allclose(np.dot(saved_data, tr_both.T), data)
Example #17
0
    def initialize_cov(self, cov=None, scales=None, scaling=20):
        """Define C_0, the initial jump distributioin covariance matrix.

        Return:
            - cov,  if cov != None
            - covariance matrix built from the scales dictionary if scales!=None
            - covariance matrix estimated from the stochastics trace
            - covariance matrix estimated from the stochastics value, scaled by
                scaling parameter.
        """
        if cov:
            return cov
        elif scales:
            ord_sc = self.order_scales(scales)
            return np.eye(self.dim) * ord_sc
        else:
            try:
                a = self.trace2array(-2000, -1)
                nz = a[:, 0] != 0
                return np.cov(a[nz, :], rowvar=0)
            except:
                ord_sc = []
                for s in self.stochastics:
                    this_value = abs(np.ravel(s.value))
                    for elem in this_value:
                        ord_sc.append(elem)
                # print len(ord_sc), self.dim
                return np.eye(self.dim) * ord_sc / scaling
Example #18
0
def test_t_contrast_add():
    mulm, n, p, q = ols_glm()
    c1, c2 = np.eye(q)[0], np.eye(q)[1]
    con = mulm.contrast(c1) + mulm.contrast(c2)
    z_vals = con.z_score()
    assert_almost_equal(z_vals.mean(), 0, 0)
    assert_almost_equal(z_vals.std(), 1, 0)
Example #19
0
def test_high_level_glm_with_data():
    shapes, rk = ((7, 6, 5, 20), (7, 6, 5, 19)), 3
    mask, fmri_data, design_matrices = write_fake_fmri_data(shapes, rk)

    # without mask
    multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask=None)
    multi_session_model.fit()
    z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2)
    assert_equal(np.sum(z_image.get_data() == 0), 0)

    # compute the mask
    multi_session_model = FMRILinearModel(fmri_data, design_matrices,
                                          m=0, M=.01, threshold=0.)
    multi_session_model.fit()
    z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2)
    assert_true(z_image.get_data().std() < 3. )

    # with mask
    multi_session_model = FMRILinearModel(fmri_data, design_matrices, mask)
    multi_session_model.fit()
    z_image, effect_image, variance_image= multi_session_model.contrast(
        [np.eye(rk)[:2]] * 2, output_effects=True, output_variance=True)
    assert_array_equal(z_image.get_data() == 0., load(mask).get_data() == 0.)
    assert_true(
        (variance_image.get_data()[load(mask).get_data() > 0, 0] > .001).all())

    # without scaling
    multi_session_model.fit(do_scaling=False)
    z_image, = multi_session_model.contrast([np.eye(rk)[1]] * 2)
    assert_true(z_image.get_data().std() < 3. )
Example #20
0
    def test_comparisons(self):
        f, g, h = self.f, self.g, self.h
        self.fake_raw_data = {
            f: arange(25).reshape(5, 5),
            g: arange(25).reshape(5, 5) - eye(5),
            h: full((5, 5), 5),
        }
        f_data = self.fake_raw_data[f]
        g_data = self.fake_raw_data[g]

        cases = [
            # Sanity Check with hand-computed values.
            (f, g, eye(5), zeros((5, 5))),
            (f, 10, f_data, 10),
            (10, f, 10, f_data),
            (f, f, f_data, f_data),
            (f + 1, f, f_data + 1, f_data),
            (1 + f, f, 1 + f_data, f_data),
            (f, g, f_data, g_data),
            (f + 1, g, f_data + 1, g_data),
            (f, g + 1, f_data, g_data + 1),
            (f + 1, g + 1, f_data + 1, g_data + 1),
            ((f + g) / 2, f ** 2, (f_data + g_data) / 2, f_data ** 2),
        ]
        for op in (gt, ge, lt, le, ne):
            for expr_lhs, expr_rhs, expected_lhs, expected_rhs in cases:
                self.check_output(
                    op(expr_lhs, expr_rhs),
                    op(expected_lhs, expected_rhs),
                )
Example #21
0
    def test_boolean_binops(self):
        f, g, h = self.f, self.g, self.h
        self.fake_raw_data = {
            f: arange(25).reshape(5, 5),
            g: arange(25).reshape(5, 5) - eye(5),
            h: full((5, 5), 5),
        }

        # Should be True on the diagonal.
        eye_filter = f > g
        # Should be True in the first row only.
        first_row_filter = f < h

        eye_mask = eye(5, dtype=bool)
        first_row_mask = zeros((5, 5), dtype=bool)
        first_row_mask[0] = 1

        self.check_output(eye_filter, eye_mask)
        self.check_output(first_row_filter, first_row_mask)

        for op in (and_, or_):  # NumExpr doesn't support xor.
            self.check_output(
                op(eye_filter, first_row_filter),
                op(eye_mask, first_row_mask),
            )
Example #22
0
    def test_joints_api(self):
        # Verify construction from both Isometry3d and 4x4 arrays,
        # and sanity-check that the accessors function.
        name = "z"
        prismatic_joint_np = PrismaticJoint(name, np.eye(4),
                                            np.array([0., 0., 1.]))
        prismatic_joint_isom = PrismaticJoint(name, Isometry3.Identity(),
                                              np.array([0., 0., 1.]))
        self.assertEqual(prismatic_joint_isom.get_num_positions(), 1)
        self.assertEqual(prismatic_joint_isom.get_name(), name)

        name = "theta"
        revolute_joint_np = RevoluteJoint(name, np.eye(4),
                                          np.array([0., 0., 1.]))
        revolute_joint_isom = RevoluteJoint(name, Isometry3.Identity(),
                                            np.array([0., 0., 1.]))
        self.assertEqual(revolute_joint_isom.get_num_positions(), 1)
        self.assertEqual(revolute_joint_isom.get_name(), name)

        name = "fixed"
        fixed_joint_np = FixedJoint(name, np.eye(4))
        fixed_joint_isom = FixedJoint(name, Isometry3.Identity())
        self.assertEqual(fixed_joint_isom.get_num_positions(), 0)
        self.assertEqual(fixed_joint_isom.get_name(), name)

        name = "rpy"
        rpy_floating_joint_np = RollPitchYawFloatingJoint(
            name, np.eye(4))
        rpy_floating_joint_isom = RollPitchYawFloatingJoint(
            name, Isometry3.Identity())
        self.assertEqual(rpy_floating_joint_isom.get_num_positions(), 6)
        self.assertEqual(rpy_floating_joint_isom.get_name(), name)
Example #23
0
def test_dkln3():
    dim, scale = 3, 4
    m1, m2 = np.zeros(dim), np.zeros(dim)
    P1, P2 = np.eye(dim), scale * np.eye(dim)
    test1 = .5 * (dim * np.log(scale) + dim * (1. / scale - 1))
    test2 = .5 * (-dim * np.log(scale) + dim * (scale - 1)) 
    assert dkl_gaussian(m1, P1, m2, P2) == test2
def LQR_control(A, B, x):

    Kopt, X, ev = dlqr(A, B, np.eye(2), np.eye(1))

    u = -Kopt * x

    return u
Example #25
0
def test_dkln2():
    dim, offset = 3, 4.
    m1 = np.zeros(dim)
    P1 = np.eye(dim)
    m2 = offset * np.ones(dim)
    P2 = np.eye(dim)
    assert dkl_gaussian(m1, P1, m2, P2) == .5 * dim * offset ** 2
Example #26
0
    def __init__(self, dim_x, dim_z, plant, kappa=0):

        self.plant = plant
        self.fx = plant.fx
        self.hx = plant.hx

        self._dim_x = dim_x
        self._dim_z = dim_z

        self.x = zeros(dim_x)
        self.P = eye(dim_x)

        self.R = 1.0e-03 * array([
            [0.000003549299086,-0.000002442814972,-0.000004480024840,0.000267707847733,-0.000144518246735,-0.000212282673978],
            [-0.000002442814972,0.000005899512446,0.000006498387107,-0.000138622536892,0.000440883366233,0.000388550687603],
            [-0.000004480024840,0.000006498387107,0.000014749347917,-0.000218834499062,0.000402004146826,0.000932091499876],
            [0.000267707847733,-0.000138622536892,-0.000218834499062,0.042452413803684,-0.022718840083072,-0.034590131072346],
            [-0.000144518246735,0.000440883366233,0.000402004146826,-0.022718840083072,0.071342980281184,0.064549199777213],
            [-0.000212282673978,0.000388550687603,0.000932091499876,-0.034590131072346,0.064549199777213,0.149298685351403],
            ])

        self.Q = 1.0e-09*eye(dim_x) 

        self._num_sigmas = 2*dim_x + 1
        self.kappa = kappa

        # weights for the sigma points
        self.W = self.weights(dim_x, kappa)
        
        # sigma points transformed through f(x) and h(x)
        # variables for efficiency so we don't recreate every update
        self.sigmas_f = zeros((self._num_sigmas, self._dim_x))
Example #27
0
 def test_f_test(self):
     m = self.m
     kvars = self.kvars
     f_unreg = self.res_unreg.f_test(np.eye(m))
     f_reg = self.res_reg.f_test(np.eye(kvars)[:m])
     assert_almost_equal(f_unreg.fvalue, f_reg.fvalue, DECIMAL_2)
     assert_almost_equal(f_unreg.pvalue, f_reg.pvalue, DECIMAL_3)
Example #28
0
    def test_rigid_body_api(self):
        # Tests as much of the RigidBody API as is possible in isolation.
        # Adding collision geometry is *not* tested here, as it needs to
        # be done in the context of the RigidBodyTree.
        body = RigidBody()
        name = "body"
        body.set_name(name)
        self.assertEqual(body.get_name(), name)
        inertia = np.eye(6)
        body.set_spatial_inertia(inertia)
        self.assertTrue(np.allclose(inertia, body.get_spatial_inertia()))

        # Try adding a joint to a dummy body.
        body_joint = PrismaticJoint("z", np.eye(4),
                                    np.array([0., 0., 1.]))
        self.assertFalse(body.has_joint())
        dummy_body = RigidBody()
        body.add_joint(dummy_body, body_joint)
        self.assertEqual(body.getJoint(), body_joint)
        self.assertTrue(body.has_joint())

        # Try adding visual geometry.
        box_element = shapes.Box([1.0, 1.0, 1.0])
        box_visual_element = shapes.VisualElement(
            box_element, np.eye(4), [1., 0., 0., 1.])
        body.AddVisualElement(box_visual_element)
        body_visual_elements = body.get_visual_elements()
        self.assertEqual(len(body_visual_elements), 1)
        self.assertEqual(body_visual_elements[0].getGeometry().getShape(),
                         box_visual_element.getGeometry().getShape())

        # Test collision-related methods.
        self.assertEqual(body.get_num_collision_elements(), 0)
        self.assertEqual(len(body.get_collision_element_ids()), 0)
Example #29
0
def bench_load_save():
    rng = np.random.RandomState(20111001)
    repeat = 4
    img_shape = (128, 128, 64)
    arr = rng.normal(size=img_shape)
    img = Nifti1Image(arr, np.eye(4))
    sio = BytesIO()
    img.file_map['image'].fileobj = sio
    hdr = img.get_header()
    sys.stdout.flush()
    print("\nImage load save")
    print("----------------")
    hdr.set_data_dtype(np.float32)
    mtime = measure('img.to_file_map()', repeat)
    print('%30s %6.2f' % ('Save float64 to float32', mtime))
    mtime = measure('img.from_file_map(img.file_map)', repeat)
    print('%30s %6.2f' % ('Load from float32', mtime))
    hdr.set_data_dtype(np.int16)
    mtime = measure('img.to_file_map()', repeat)
    print('%30s %6.2f' % ('Save float64 to int16', mtime))
    mtime = measure('img.from_file_map(img.file_map)', repeat)
    print('%30s %6.2f' % ('Load from int16', mtime))
    arr = np.random.random_integers(low=-1000,high=-1000, size=img_shape)
    arr = arr.astype(np.int16)
    img = Nifti1Image(arr, np.eye(4))
    sio = BytesIO()
    img.file_map['image'].fileobj = sio
    hdr = img.get_header()
    hdr.set_data_dtype(np.float32)
    mtime = measure('img.to_file_map()', repeat)
    print('%30s %6.2f' % ('Save Int16 to float32', mtime))
    sys.stdout.flush()
Example #30
0
    def init_local_transformation(self):
        """
        compute local transformation w.r.t for the first time (compute everything)
        """
        # rotation part
        self.localTransformation = numpy.eye(4)
        # print type(self), id(self), self.rotation
        try:
            angle = self.rotation[3]
        except IndexError:
            logger.exception("Failed on {0}, rotation={1}".format(type(self),self.rotation))
            raise
        direction = self.rotation[:3]
        self.localTransformation[0:3,0:3] = tf.rotation_matrix(angle, direction)[:3,:3]
        self.rpy = tf.euler_from_matrix(self.localTransformation)

        # last column
        scale = [1,1,1]
        if self.parent:
            scale = self.cumul_scale()
        scale_translation = [self.translation[i]*scale[i] for i in range(3)]

        self.localTransformation[0:3,3] = numpy.array(scale_translation)+\
            numpy.dot(numpy.eye(3)-self.localTransformation[:3,:3],
                      numpy.array(self.center))

        # last line
        self.localTransformation[3,0:4]=[0,0,0,1]
Example #31
0
def generate_E(P=15):
    return wishart.rvs(df=P, scale=(1.0 / P) * np.eye(P))
def make_tilespec_from_sbemimage(rootdir,
                                 outputProject,
                                 outputOwner,
                                 outputStack,
                                 minval=0,
                                 maxval=50000):

    mipmap_args = []
    tilespecpaths = []

    if not os.path.exists('meta'):
        print('Change to proper directory!')
        exit()

    mfile0 = os.path.join('meta', 'logs', 'imagelist_')

    mfiles = glob.glob(mfile0 + '*')

    tiles = list()
    views = list()

    idx = 0

    for mfile in mfiles:

        with open(mfile) as mf:
            ml = mf.read().splitlines()

        mdfile = os.path.join('meta', 'logs',
                              'metadata' + mfile[mfile.rfind('_'):])

        with open(mdfile) as mdf:
            mdl = mdf.read().splitlines()

        conffile = os.path.join('meta', 'logs',
                                'config' + mfile[mfile.rfind('_'):])

        with open(conffile) as cf:
            cl = cf.read().splitlines()

        config = parse_adoc(cl)

        pxs = float(config['grab_frame_pixel_size'][0])  #/1000  # in um
        z_thick = float(config['slice_thickness'][0])  #/1000  # in um

        # generate the individual transformation matrices
        # 1)  The scale and rotation information form the map item
        mat = np.diag((pxs, pxs, z_thick))

        mat_s = np.concatenate((mat, [[0], [0], [0]]), axis=1)
        mat_s = np.concatenate((mat_s, [[0, 0, 0, 1]]))

        tilespeclist = []
        z = 0

        for line in mdl:
            if line.startswith('TILE: '):
                tile = bdv.str2dict(line[line.find('{'):])
                tiles.append(tile)

                # 2) The translation matrix to position the object in space (lower left corner)
                mat_t = np.concatenate(
                    (np.eye(3), [[tile['glob_x']], [tile['glob_y']],
                                 [tile['glob_z']]]),
                    axis=1)
                mat_t = np.concatenate((mat_t, [[0, 0, 0, 1]]))

                f1 = os.path.realpath(tile['filename'])

                fbase = os.path.splitext(os.path.basename(f1))[0]

                tilespecdir = os.path.join('processed', 'tilespec')

                filepath = groupsharepath(f1)

                #print tilespecdir
                if not os.path.isdir(tilespecdir):
                    os.makedirs(tilespecdir)

                downdir = os.path.join("processed", "downsamp_images")
                #print "This is the Down Sampled Directory: %s"%downdir

                if not os.path.exists(downdir):
                    os.makedirs(downdir)

                downdir1 = groupsharepath(os.path.realpath(downdir))

                #construct command for creating mipmaps for this tilespec
                #downcmd = ['python','create_mipmaps.py','--inputImage',filepath,'--outputDirectory',downdir,'--mipmaplevels','1','2','3']
                #cmds.append(downcmd)
                mipmap_args.append((f1, os.path.realpath(downdir)))
                layout = Layout(sectionId=tile['slice_counter'],
                                scopeId='3View',
                                cameraId='3View',
                                imageRow=0,
                                imageCol=0,
                                stageX=tile['glob_x'] / 10,
                                stageY=tile['glob_y'] / 10,
                                rotation=0.0,
                                pixelsize=pxs)

                mipmap0 = MipMapLevel(level=0, imageUrl='file://' + filepath)
                mipmaplevels = [mipmap0]
                filename = tile['tileid']

                for i in range(1, 4):
                    scUrl = 'file://' + os.path.join(downdir1,
                                                     fbase) + '_mip0%d.jpg' % i
                    mml = MipMapLevel(level=i, imageUrl=scUrl)
                    mipmaplevels.append(mml)

                tform = AffineModel(M00=1,
                                    M01=0,
                                    M10=0,
                                    M11=1,
                                    B0=tile['glob_x'] / 10,
                                    B1=tile['glob_y'] / 10)

                tilespeclist.append(
                    TileSpec(tileId=tile['tileid'],
                             frameId=tile['tileid'][:tile['tileid'].find('.')],
                             z=tile['glob_z'],
                             width=tile['tile_width'],
                             height=tile['tile_height'],
                             mipMapLevels=mipmaplevels,
                             tforms=[tform],
                             minint=minval,
                             maxint=maxval,
                             layout=layout))
                z = tile['glob_z']

                json_file = os.path.realpath(
                    os.path.join(
                        tilespecdir, outputProject + '_' + outputOwner + '_' +
                        outputStack + '_%04d.json' % z))
                fd = open(json_file, "w")
                renderapi.utils.renderdump(tilespeclist,
                                           fd,
                                           sort_keys=True,
                                           indent=4,
                                           separators=(',', ': '))
                fd.close()
                tilespecpaths.append(json_file)
    return tilespecpaths, mipmap_args
def active_contour(image, snake, alpha=0.01, beta=0.1,
                   w_line=0, w_edge=1, gamma=0.01,
                   max_px_move=1.0,
                   max_num_iter=2500, convergence=0.1,
                   *,
                   boundary_condition='periodic',
                   coordinates='rc'):
    """Active contour model.

    Active contours by fitting snakes to features of images. Supports single
    and multichannel 2D images. Snakes can be periodic (for segmentation) or
    have fixed and/or free ends.
    The output snake has the same length as the input boundary.
    As the number of points is constant, make sure that the initial snake
    has enough points to capture the details of the final contour.

    Parameters
    ----------
    image : (N, M) or (N, M, 3) ndarray
        Input image.
    snake : (N, 2) ndarray
        Initial snake coordinates. For periodic boundary conditions, endpoints
        must not be duplicated.
    alpha : float, optional
        Snake length shape parameter. Higher values makes snake contract
        faster.
    beta : float, optional
        Snake smoothness shape parameter. Higher values makes snake smoother.
    w_line : float, optional
        Controls attraction to brightness. Use negative values to attract
        toward dark regions.
    w_edge : float, optional
        Controls attraction to edges. Use negative values to repel snake from
        edges.
    gamma : float, optional
        Explicit time stepping parameter.
    max_px_move : float, optional
        Maximum pixel distance to move per iteration.
    max_num_iter : int, optional
        Maximum iterations to optimize snake shape.
    convergence : float, optional
        Convergence criteria.
    boundary_condition : string, optional
        Boundary conditions for the contour. Can be one of 'periodic',
        'free', 'fixed', 'free-fixed', or 'fixed-free'. 'periodic' attaches
        the two ends of the snake, 'fixed' holds the end-points in place,
        and 'free' allows free movement of the ends. 'fixed' and 'free' can
        be combined by parsing 'fixed-free', 'free-fixed'. Parsing
        'fixed-fixed' or 'free-free' yields same behaviour as 'fixed' and
        'free', respectively.
    coordinates : {'rc'}, optional
        This option remains for compatibility purpose only and has no effect.
        It was introduced in 0.16 with the ``'xy'`` option, but since 0.18,
        only the ``'rc'`` option is valid.
        Coordinates must be set in a row-column format.

    Returns
    -------
    snake : (N, 2) ndarray
        Optimised snake, same shape as input parameter.

    References
    ----------
    .. [1]  Kass, M.; Witkin, A.; Terzopoulos, D. "Snakes: Active contour
            models". International Journal of Computer Vision 1 (4): 321
            (1988). :DOI:`10.1007/BF00133570`

    Examples
    --------
    >>> from skimage.draw import circle_perimeter
    >>> from skimage.filters import gaussian

    Create and smooth image:

    >>> img = np.zeros((100, 100))
    >>> rr, cc = circle_perimeter(35, 45, 25)
    >>> img[rr, cc] = 1
    >>> img = gaussian(img, 2, preserve_range=False)

    Initialize spline:

    >>> s = np.linspace(0, 2*np.pi, 100)
    >>> init = 50 * np.array([np.sin(s), np.cos(s)]).T + 50

    Fit spline to image:

    >>> snake = active_contour(img, init, w_edge=0, w_line=1, coordinates='rc')  # doctest: +SKIP
    >>> dist = np.sqrt((45-snake[:, 0])**2 + (35-snake[:, 1])**2)  # doctest: +SKIP
    >>> int(np.mean(dist))  # doctest: +SKIP
    25

    """
    if coordinates != 'rc':
        raise ValueError('Coordinate values must be set in a row column '
                         'format. `coordinates` must be set to "rc".')
    max_num_iter = int(max_num_iter)
    if max_num_iter <= 0:
        raise ValueError("max_num_iter should be >0.")
    convergence_order = 10
    valid_bcs = ['periodic', 'free', 'fixed', 'free-fixed',
                 'fixed-free', 'fixed-fixed', 'free-free']
    if boundary_condition not in valid_bcs:
        raise ValueError("Invalid boundary condition.\n" +
                         "Should be one of: "+", ".join(valid_bcs)+'.')

    img = img_as_float(image)
    float_dtype = _supported_float_type(image.dtype)
    img = img.astype(float_dtype, copy=False)

    RGB = img.ndim == 3

    # Find edges using sobel:
    if w_edge != 0:
        if RGB:
            edge = [sobel(img[:, :, 0]), sobel(img[:, :, 1]),
                    sobel(img[:, :, 2])]
        else:
            edge = [sobel(img)]
    else:
        edge = [0]

    # Superimpose intensity and edge images:
    if RGB:
        img = w_line*np.sum(img, axis=2) \
            + w_edge*sum(edge)
    else:
        img = w_line*img + w_edge*edge[0]

    # Interpolate for smoothness:
    intp = RectBivariateSpline(np.arange(img.shape[1]),
                               np.arange(img.shape[0]),
                               img.T, kx=2, ky=2, s=0)

    snake_xy = snake[:, ::-1]
    x = snake_xy[:, 0].astype(float_dtype)
    y = snake_xy[:, 1].astype(float_dtype)
    n = len(x)
    xsave = np.empty((convergence_order, n), dtype=float_dtype)
    ysave = np.empty((convergence_order, n), dtype=float_dtype)

    # Build snake shape matrix for Euler equation in double precision
    eye_n = np.eye(n, dtype=float)
    a = (np.roll(eye_n, -1, axis=0)
         + np.roll(eye_n, -1, axis=1)
         - 2 * eye_n)  # second order derivative, central difference
    b = (np.roll(eye_n, -2, axis=0)
         + np.roll(eye_n, -2, axis=1)
         - 4 * np.roll(eye_n, -1, axis=0)
         - 4 * np.roll(eye_n, -1, axis=1)
         + 6 * eye_n)  # fourth order derivative, central difference
    A = -alpha * a + beta * b

    # Impose boundary conditions different from periodic:
    sfixed = False
    if boundary_condition.startswith('fixed'):
        A[0, :] = 0
        A[1, :] = 0
        A[1, :3] = [1, -2, 1]
        sfixed = True
    efixed = False
    if boundary_condition.endswith('fixed'):
        A[-1, :] = 0
        A[-2, :] = 0
        A[-2, -3:] = [1, -2, 1]
        efixed = True
    sfree = False
    if boundary_condition.startswith('free'):
        A[0, :] = 0
        A[0, :3] = [1, -2, 1]
        A[1, :] = 0
        A[1, :4] = [-1, 3, -3, 1]
        sfree = True
    efree = False
    if boundary_condition.endswith('free'):
        A[-1, :] = 0
        A[-1, -3:] = [1, -2, 1]
        A[-2, :] = 0
        A[-2, -4:] = [-1, 3, -3, 1]
        efree = True

    # Only one inversion is needed for implicit spline energy minimization:
    inv = np.linalg.inv(A + gamma * eye_n)
    # can use float_dtype once we have computed the inverse in double precision
    inv = inv.astype(float_dtype, copy=False)

    # Explicit time stepping for image energy minimization:
    for i in range(max_num_iter):
        # RectBivariateSpline always returns float64, so call astype here
        fx = intp(x, y, dx=1, grid=False).astype(float_dtype, copy=False)
        fy = intp(x, y, dy=1, grid=False).astype(float_dtype, copy=False)

        if sfixed:
            fx[0] = 0
            fy[0] = 0
        if efixed:
            fx[-1] = 0
            fy[-1] = 0
        if sfree:
            fx[0] *= 2
            fy[0] *= 2
        if efree:
            fx[-1] *= 2
            fy[-1] *= 2
        xn = inv @ (gamma*x + fx)
        yn = inv @ (gamma*y + fy)

        # Movements are capped to max_px_move per iteration:
        dx = max_px_move * np.tanh(xn - x)
        dy = max_px_move * np.tanh(yn - y)
        if sfixed:
            dx[0] = 0
            dy[0] = 0
        if efixed:
            dx[-1] = 0
            dy[-1] = 0
        x += dx
        y += dy

        # Convergence criteria needs to compare to a number of previous
        # configurations since oscillations can occur.
        j = i % (convergence_order + 1)
        if j < convergence_order:
            xsave[j, :] = x
            ysave[j, :] = y
        else:
            dist = np.min(np.max(np.abs(xsave - x[None, :])
                                 + np.abs(ysave - y[None, :]), 1))
            if dist < convergence:
                break

    return np.stack([y, x], axis=1)
Example #34
0
def case_identity(n: int) -> Tuple[pn.linops.LinearOperator, np.ndarray]:
    return pn.linops.Identity(shape=n), np.eye(n)
Example #35
0
from tflearn.data_utils import shuffle

hm_epochs = 5
batch_size = 100
embedding_size = 128
rnn_size = 128
max_features = 20000
max_len = 200

(X_train, y_train), (X_test,
                     y_test) = reuters.load_data(num_words=max_features)
X_train = sequence.pad_sequences(X_train, maxlen=max_len)
X_test = sequence.pad_sequences(X_test, maxlen=max_len)

n_classes = len(np.unique(y_train))
y_train = np.eye(n_classes)[y_train]
y_test = np.eye(n_classes)[y_test]

graph = tf.Graph()

with graph.as_default():
    x = tf.placeholder('int32', [None, max_len])
    y = tf.placeholder('int32')
    keep_prob = tf.placeholder('float32')

    layer = {
        'weights': tf.Variable(tf.random_normal([rnn_size, n_classes])),
        'biases': tf.Variable(tf.random_normal([n_classes]))
    }

    embeddings = tf.Variable(
Example #36
0
    def stability_analysis(self, bottom_friction=False):
        """ Performs the baroclinic linear instability analysis given
                given the base state velocity :math: `(U, V)` and
                the stretching matrix  :math: `S`:

        .. math:: A \Phi = \omega B \Phi,

        where

        .. math:: A = B (U k + V l) + I (k Q_y - l Q_x) +
                        1j \delta_{N N} r_{ek} I  \kappa^2

        where :math:`\delta_{N N} = [0,0,\dots,0,1] ,`

        and

        .. math:: B =  S - I \kappa^2 .


        The eigenstructure is

        .. math:: \Phi

        and the eigenvalue is

        .. math:: `\omega`

        The growth rate is Im\ :math:`\{\omega\}`.


        Parameters
        ----------
        bottom_friction: optional inclusion linear bottom drag
                         in the linear stability calculation
                         (default is False, as if :math: `r_{ek} = 0`)

        Returns
        -------
        omega: complex array
             The eigenvalues with largest complex part (units: inverse model time)
        phi: complex array
             The eigenvectors associated associated with \omega (unitless)

        """

        omega = np.zeros_like(self.wv) + 0.j
        phi = np.zeros_like(self.qh)

        I = np.eye(self.nz)

        L2 = self.S[:, :, np.newaxis,
                    np.newaxis] - self.wv2 * I[:, :, np.newaxis, np.newaxis]

        Q = I[:, :, np.newaxis, np.newaxis] * (self.ikQy - self.ilQx).imag

        Uk = (self.Ubg * I)[:, :, np.newaxis, np.newaxis] * self.k
        Vl = (self.Vbg * I)[:, :, np.newaxis, np.newaxis] * self.l
        L3 = np.einsum('ij...,jk...->ik...', L2, Uk + Vl) + 0j

        if bottom_friction:
            L3[-1, -1, :, :] += 1j * self.rek * self.wv2

        L4 = self.a.T

        M = np.einsum('...ij,...jk->...ik', L4, (L3 + Q).T)

        evals, evecs = np.linalg.eig(M)
        evals, evecs = evals.T, evecs.T

        # sorting things this way proved way
        #  more faster than using numpy's argsort() !
        imax = evals.imag.argmax(axis=0)
        for i in range(self.nl):
            for j in range(self.nk):
                omega[i, j] = evals[imax[i, j], i, j]
                phi[:, i, j] = evecs[imax[i, j], :, i, j]

        return omega, phi
    #print('状态 = {}, 位置 = {}'.format(state, loc))
    while True:
        loc = np.unravel_index(state, env.shape)
        action = np.random.choice(env.nA, p=policy[state])
        next_state, reward, done, _ = env.step(action)
        print('状态 = {}, 位置 = {}, 奖励 = {}, 动作 = {}, 下一个状态 = {}'.format(state, loc, reward, action, next_state))
        total_reward += reward
        if done:
            break
        state = next_state
    return total_reward

actions = np.ones(env.shape, dtype=int)
actions[-1, :] = 0
actions[:, -1] = 2
optimal_policy = np.eye(4)[actions.reshape(-1)]

total_reward = play_once(env, optimal_policy)







def calculate_bellman(env, policy, gamma = 1):
    equation_part_one, equation_part_two = np.eye(env.nS), np.zeros(env.nS)
    for state in range(env.nS - 1):
        for action in range(env.nA):
            pi = policy[state][action]
            for p, next_state, reward, done in env.P[state][action]:
Example #38
0
        sys.stdout.flush()

    return x_kp1, X


if __name__ == '__main__':
    import matplotlib.pyplot as plt
    np.random.seed(0)
    dim = 3
    x0 = 10 * np.random.randn(dim)
    # function
    f = lambda x: x @ x
    # gradient
    grad = lambda x: 2 * x
    # hessian
    H = 2.0 * np.eye(dim)
    # covariance
    C = 2.0 * np.eye(dim)

    # optimize
    x_best,X = sgdm_cv(grad, x0,C,max_iter=100,batch_size=100,\
               eta=0.01,gamma=1.0,theta=0.9,func =f, verbose=True)
    # risk function
    rn = lambda x: x @ x + 0.5 * np.trace(H @ C)

    fX = [f(x) for x in X]
    rnX = [rn(x) for x in X]
    print(x_best)

    plt.plot(fX, linewidth=3, label='f')
    plt.plot(rnX, linewidth=3, label='risk neutral')
def computeHomography(f1, f2, matches, A_out=None):
    '''
    Input:
        f1 -- list of cv2.KeyPoint objects in the first image
        f2 -- list of cv2.KeyPoint objects in the second image
        matches -- list of cv2.DMatch objects
            DMatch.queryIdx: The index of the feature in the first image
            DMatch.trainIdx: The index of the feature in the second image
            DMatch.distance: The distance between the two features
        A_out -- ignore this parameter. If computeHomography is needed
                 in other TODOs, call computeHomography(f1,f2,matches)
    Output:
        H -- 2D homography (3x3 matrix)
        Takes two lists of features, f1 and f2, and a list of feature
        matches, and estimates a homography from image 1 to image 2 from the matches.
    '''
    num_matches = len(matches)

    # Dimensions of the A matrix in the homogenous linear
    # equation Ah = 0
    num_rows = 2 * num_matches
    num_cols = 9
    A_matrix_shape = (num_rows, num_cols)
    A = np.zeros(A_matrix_shape)

    for i in range(len(matches)):
        m = matches[i]
        (a_x, a_y) = f1[m.queryIdx].pt
        (b_x, b_y) = f2[m.trainIdx].pt

        # BEGIN TODO 2
        # Fill in the matrix A in this loop.
        # Access elements using square brackets. e.g. A[0,0]
        # TODO-BLOCK-BEGIN
        A[2 * i, :] = [a_x, a_y, 1, 0, 0, 0, -b_x * a_x, -b_x * a_y, -b_x]
        A[2 * i + 1, :] = [0, 0, 0, a_x, a_y, 1, -b_y * a_x, -b_y * a_y, -b_y]
        # raise Exception("TODO in alignment.py not implemented")
        # TODO-BLOCK-END
        # END TODO

    U, s, Vt = np.linalg.svd(A)

    if A_out is not None:
        A_out[:] = A

    # s is a 1-D array of singular values sorted in descending order
    # U, Vt are unitary matrices
    # Rows of Vt are the eigenvectors of A^TA.
    # Columns of U are the eigenvectors of AA^T.

    # Homography to be calculated
    H = np.eye(3)

    # BEGIN TODO 3
    # Fill the homography H with the appropriate elements of the SVD
    # TODO-BLOCK-BEGIN
    # H = np.reshape(Vt[len(s) - 1, :], (3, 3))//为什么len不减一就过了
    H = np.reshape(Vt[-1, :], (3, 3))
    # raise Exception("TODO in alignment.py not implemented")
    # TODO-BLOCK-END
    # END TODO

    return H
Example #40
0
def sgdm_cv(grad,
            x0,
            C,
            max_iter=100,
            batch_size=2,
            eta=1.0,
            gamma=1.0,
            theta=0.9,
            func=None,
            verbose=False):
    """
  Momentum Stochastic Gradient with decreasing learning rate. Optimizes 
  A risk Neutral Objective E[ f(x+u)] by using a Monte Carlo
  Gradient esimate. 
  \\nabla E[f(x+u)] = E[\\nabla f(x+u)] 
  \\approx \\frac{1}{N}sum_{i=1}^N \\nabla f(x+u_i)   

  grad: returns gradient array
  x0: initial guess
  C: covariance matrix for perturbations
  max_iter: maximum number of iterations
  batch_size: number of points used to approximate gradient
  eta: initial learning rate;
       large values start iteration with a bigger step
  eta: float > 0
  gamma: learning rate growth parameter; 
         large values shrink step size faster
  gamma: float > 0
  theta: momentum parameter
  theta: float in (0,1)
  func: objective function handle


  return:
  x_k: 1D-array, the last iterate
  X: 2D-array, all points evaluated
  """
    if func is not None:
        f0 = func(x0)

    if verbose:
        print("")
        print("SGD Optimization")
        print(f"max_iter   = {max_iter}")
        print(f"batch_size = {batch_size}")
        print(f"eta        = {eta}")
        print(f"gamma      = {gamma}")
        print(f"theta      = {theta}")
        if func is not None:
            print(f"f(x0)      = {f0}")
        print("")
        sys.stdout.flush()

    # inital guess
    x_k = x0
    v_km1 = 0 * x0

    # dimension
    dim = len(x0)

    # storage
    X = np.zeros((max_iter + 1, dim))
    X[0] = copy(x0)

    # cholesky of covariance
    Q = np.linalg.cholesky(C)

    # BFGS Hessian
    H_k = np.eye(dim)

    # start timer
    t0 = time()

    # stop after number of iterations
    for kk in range(1, max_iter + 1):

        # print stuff
        if verbose and kk > 1:
            if func is not None:
                print(
                    f"{kk}/{max_iter};f(x) = {f_kp1 :.10f}; {(time()-t0)/(kk-1)}sec per iteration"
                )
            else:
                print(
                    f"{kk}/{max_iter}; {(time()-t0)/(kk-1)}sec per iteration")
            sys.stdout.flush()

        # generate Gaussian Correlated perturbations
        x_batch = x_k + np.random.randn(batch_size, dim) @ Q.T

        # compute gradient of f
        nablaf = np.array([grad(x) for x in x_batch])

        # finite difference hessian
        #fk = func(x_k)
        #E  = np.eye(dim)
        #h  = 1e-6
        #H_k  = np.diag([(func(x_k+h*E[i])/h - 2*fk/h + func(x_k-h*E[i])/h)/h for i in range(dim)])

        # control variate
        mu_ghat = grad(x_k)  # mean of control variate
        ghat = mu_ghat + (x_batch - x_k) @ H_k.T

        # compute A
        cov_ghat = H_k @ C @ H_k.T  # covariance cov(ghat)
        cov_ghat_nf = np.cov(nablaf.T, (ghat - mu_ghat).T)
        cov_ghat_nf = cov_ghat_nf[dim:, :dim]

        #A  = -np.linalg.solve(H_k.T,np.linalg.solve(Q.T,np.linalg.solve(Q,np.linalg.solve(H_k,cov_ghat_nf))))
        # A  = -np.trace(cov_ghat_nf)/np.trace(cov_ghat)
        A = -np.diag(cov_ghat_nf) / np.diag(cov_ghat)

        # take sample average to get gradient estimator
        g_k = np.mean(nablaf + A * (ghat - mu_ghat), axis=0)

        if verbose:
            print(f"    Variance Reduction:\
      {np.sum(np.var(nablaf + A*(ghat - mu_ghat),axis=0))/np.sum(np.var(nablaf,axis=0))}"
                  )
            sys.stdout.flush()

        # compute the step direction
        v_k = theta * v_km1 + g_k

        # compute step using decreasing step size
        s_k = -(eta / (1.0 + gamma * kk)) * v_k
        # 1/k step size decrease
        # s_k   = -(eta/(1.0 + gamma*np.sqrt(kk)))*v_k; # 1/sqrt(k) step size decrease
        x_kp1 = x_k + s_k

        if func is not None:
            # compute function value
            f_kp1 = func(x_kp1)

        # update BFGS Hessian
        y_k = grad(x_kp1) - mu_ghat
        r_k = H_k @ s_k
        H_kp1 = H_k + np.outer(y_k,y_k)/(y_k @ s_k)\
                - np.outer(r_k, r_k)/(s_k@ r_k)
        H_k = copy(H_kp1)

        # reset for next iteration
        x_k = copy(x_kp1)
        v_km1 = copy(v_k)

        # save w_k
        X[kk] = copy(x_k)

    if verbose:
        print("Done!")
        sys.stdout.flush()

    return x_kp1, X
Example #41
0
    def _test_percentile_between(self):

        quintiles = range(5)
        filter_names = ['pct_' + str(q) for q in quintiles]
        iter_quintiles = list(zip(filter_names, quintiles))
        terms = {
            name: self.f.percentile_between(q * 20.0, (q + 1) * 20.0)
            for name, q in iter_quintiles
        }

        # Test with 5 columns and no NaNs.
        eye5 = eye(5, dtype=float64)
        expected = {}
        for name, quintile in iter_quintiles:
            if quintile < 4:
                # There are four 0s and one 1 in each row, so the first 4
                # quintiles should be all the locations with zeros in the input
                # array.
                expected[name] = ~eye5.astype(bool)
            else:
                # The top quintile should match the sole 1 in each row.
                expected[name] = eye5.astype(bool)

        self.check_terms(
            terms=terms,
            expected=expected,
            initial_workspace={self.f: eye5},
            mask=self.build_mask(ones((5, 5))),
        )

        # Test with 6 columns, no NaNs, and one masked entry per day.
        eye6 = eye(6, dtype=float64)
        mask = array(
            [[1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1],
             [1, 1, 0, 1, 1, 1], [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 1]],
            dtype=bool)
        expected = {}
        for name, quintile in iter_quintiles:
            if quintile < 4:
                # Should keep all values that were 0 in the base data and were
                # 1 in the mask.
                expected[name] = mask & ~eye6.astype(bool)
            else:
                # The top quintile should match the sole 1 in each row.
                expected[name] = eye6.astype(bool)

        self.check_terms(
            terms=terms,
            expected=expected,
            initial_workspace={self.f: eye6},
            mask=self.build_mask(mask),
        )

        # Test with 6 columns, no mask, and one NaN per day.  Should have the
        # same outcome as if we had masked the NaNs.
        # In particular, the NaNs should never pass any filters.
        eye6_withnans = eye6.copy()
        putmask(eye6_withnans, ~mask, nan)
        expected = {}
        for name, quintile in iter_quintiles:
            if quintile < 4:
                # Should keep all values that were 0 in the base data and were
                # 1 in the mask.
                expected[name] = mask & (~eye6.astype(bool))
            else:
                # Should keep all the 1s in the base data.
                expected[name] = eye6.astype(bool)

        self.check_terms(
            terms,
            expected,
            initial_workspace={self.f: eye6},
            mask=self.build_mask(mask),
        )
def leastSquaresFit(f1, f2, matches, m, inlier_indices):
    '''
    Input:
        f1 -- list of cv2.KeyPoint objects in the first image
        f2 -- list of cv2.KeyPoint objects in the second image
        matches -- list of cv2.DMatch objects
            DMatch.queryIdx: The index of the feature in the first image
            DMatch.trainIdx: The index of the feature in the second image
            DMatch.distance: The distance between the two features
        m -- MotionModel (eTranslate, eHomography)
        inlier_indices -- inlier match indices (indexes into 'matches')

    Output:
        M - transformation matrix

        Compute the transformation matrix from f1 to f2 using only the
        inliers and return it.
    '''

    # This function needs to handle two possible motion models,
    # pure translations (eTranslate)
    # and full homographies (eHomography).

    M = np.eye(3)

    if m == eTranslate:
        # For spherically warped images, the transformation is a
        # translation and only has two degrees of freedom.
        # Therefore, we simply compute the average translation vector
        # between the feature in f1 and its match in f2 for all inliers.

        u = 0.0
        v = 0.0

        for i in range(len(inlier_indices)):
            # BEGIN TODO 6
            # Use this loop to compute the average translation vector
            # over all inliers.
            # TODO-BLOCK-BEGIN
            x1, y1 = f1[matches[i].queryIdx].pt
            x2, y2 = f2[matches[i].trainIdx].pt
            u += x2 - x1
            v += y2 - y1
            # raise Exception("TODO in alignment.py not implemented")
            # TODO-BLOCK-END
            # END TODO

        u /= len(inlier_indices)
        v /= len(inlier_indices)

        M[0, 2] = u
        M[1, 2] = v

    elif m == eHomography:
        # BEGIN TODO 7
        # Compute a homography M using all inliers.
        # This should call computeHomography.
        # TODO-BLOCK-BEGIN
        pairs = []
        for i in range(len(inlier_indices)):
            pairs.append(matches[inlier_indices[i]])
        M = computeHomography(f1, f2, pairs)
        # raise Exception("TODO in alignment.py not implemented")
        # TODO-BLOCK-END
        # END TODO

    else:
        raise Exception("Error: Invalid motion model.")

    return M
    X0 = np.concatenate((X0, X1))
    Y0 = np.concatenate((Y0, Y1))
    if regression == False:
        class_ind = [[Y0[j][0] == i for i in range(num_class)]
                     for j in range(len(Y0))]
        Y0 = np.asarray(class_ind, dtype=np.float32)
    index = [i for i in range(len(Y0))]
    np.random.shuffle(index)
    X, Y = X0[index], Y0[index]
    return X, Y


np.random.seed(10)
num_class = 2
mean = np.random.randn(num_class)
cov = np.eye(num_class)
X, Y = generate(1000, mean, cov, [3.0], True)
col = ['r' if i == 0 else 'b' for i in Y[:]]
plt.scatter(X[:, 0], X[:, 1], c=col)
plt.xlabel("Age (y)")
plt.ylabel("Size (cm)")
plt.show()

xx, yy = generate(100, mean, cov, [[3.0, 0], [3.0, 3.0], [0, 3.0]], True)
plt.scatter(xx[:, 0], xx[:, 1])
plt.show()

lab_dim = 1
inputs = 2
lr = 0.04
epoch = 50
Example #44
0
 def state_prediction(self, sa, u):
     # U is the matrix of the controls
     # u is the long speed
     A, B = create_dyn_state_matrices(u, self)
     return np.matmul(
         (A * self.dt + np.eye(2)), self.state) + B * sa * self.dt
Example #45
0
 def inverse(self):
     n = self.U().shape[0]
     I = np.eye(n)
     #Return the inverse solution to
     #be in true matrix form
     return self.solve(I).T
Example #46
0
    def _process_experience(self, experience):
        """Curiosity module updates the desired goal depending on experience.trajectory_over"""
        ag_buffer = self.replay_buffer.buffer.BUFF.buffer_ag

        if self.current_goals is None:
            self.current_goals = experience.reset_state['desired_goal']

        computed_reward = self.env.compute_reward(
            experience.next_state['achieved_goal'], self.current_goals, {
                's': experience.state['observation'],
                'a': experience.action,
                'ns': experience.next_state['observation']
            })
        close = computed_reward > -0.15

        # First, manage the episode resets & any special behavior that occurs on goal achievement, like go explore / resets / overshooting
        reset_idxs, overshooting_idxs, overshooting_proposals = self._manage_resets_and_success_behaviors(
            experience, close)

        if reset_idxs:
            self.train.reset_next(reset_idxs)

        if overshooting_idxs and len(ag_buffer):
            self._overshoot_goals(experience, overshooting_idxs,
                                  overshooting_proposals)

        # Now consider replacing the current goals with something else:
        if np.any(experience.trajectory_over) and len(ag_buffer):
            # sample some achieved goals
            sample_idxs = np.random.randint(len(ag_buffer),
                                            size=self.num_sampled_ags *
                                            self.n_envs)
            sampled_ags = ag_buffer.get_batch(sample_idxs)
            sampled_ags = sampled_ags.reshape(self.n_envs,
                                              self.num_sampled_ags, -1)

            # compute the q-values of both the sampled achieved goals and the current goals
            states = np.tile(experience.reset_state['observation'][:, None, :],
                             (1, self.num_sampled_ags, 1))
            states = np.concatenate(
                (states, sampled_ags),
                -1).reshape(self.num_sampled_ags * self.n_envs, -1)
            states_curr = np.concatenate(
                (experience.reset_state['observation'], self.current_goals),
                -1)
            states_cat = np.concatenate((states, states_curr), 0)

            bad_q_idxs, q_values = [], None
            if self.use_qcutoff:
                q_values = self.compute_q(states_cat)
                q_values, curr_q = np.split(
                    q_values, [self.num_sampled_ags * self.n_envs])
                q_values = q_values.reshape(self.n_envs, self.num_sampled_ags)

                # Set cutoff dynamically by using intrinsic_success_percent
                if len(self.successes_deque) == 10:
                    self.min_cutoff = max(
                        self.min_min_cutoff,
                        min(np.min(q_values), self.min_cutoff))
                    intrinsic_success_percent = np.mean(self.successes_deque)
                    if intrinsic_success_percent >= self.config.cutoff_success_threshold[
                            1]:
                        self.cutoff = max(self.min_cutoff, self.cutoff - 1.)
                        self.successes_deque.clear()
                    elif intrinsic_success_percent <= self.config.cutoff_success_threshold[
                            0]:
                        self.cutoff = max(
                            min(self.config.initial_cutoff, self.cutoff + 1.),
                            self.min_min_cutoff)
                        self.successes_deque.clear()

                # zero out the "bad" values. This practically eliminates them as candidates if any goals are viable.
                bad_q_idxs = q_values < self.cutoff
                q_values[bad_q_idxs] *= -1
                min_q_values = np.min(q_values, axis=1,
                                      keepdims=True)  # num_envs x1
                q_values[bad_q_idxs] *= -1

            # score the goals -- lower is better
            goal_values = self.score_goals(
                sampled_ags, AttrDict(q_values=q_values, states=states))

            if self.config.dg_score_multiplier > 1. and self.dg_kde.ready:
                dg_scores = self.dg_kde.evaluate_log_density(
                    sampled_ags.reshape(self.n_envs * self.num_sampled_ags,
                                        -1))
                dg_scores = dg_scores.reshape(self.n_envs,
                                              self.num_sampled_ags)
                goal_values[
                    dg_scores > -np.inf] *= self.config.dg_score_multiplier

            if q_values is not None:
                goal_values[bad_q_idxs] = q_values[bad_q_idxs] * -1e-8

            if self.randomize:  # sample proportional to the absolute score
                abs_goal_values = np.abs(goal_values)
                normalized_values = abs_goal_values / np.sum(
                    abs_goal_values, axis=1, keepdims=True)
                chosen_idx = (normalized_values.cumsum(1) > np.random.rand(
                    normalized_values.shape[0])[:, None]).argmax(1)
            else:  # take minimum
                chosen_idx = np.argmin(goal_values, axis=1)

            chosen_idx = np.eye(self.num_sampled_ags)[
                chosen_idx]  # shape(sampled_ags) = n_envs x num_sampled_ags
            if q_values is not None:
                chosen_q_val = (chosen_idx * q_values).sum(axis=1,
                                                           keepdims=True)
            chosen_ags = np.sum(sampled_ags * chosen_idx[:, :, None],
                                axis=1)  # n_envs x goal_feats

            # replace goal always when first_visit_succ (relying on the dg_score_multiplier to dg focus), otherwise
            # we are going to transition into the dgs using the ag_kde_tophat
            if hasattr(self, 'curiosity_alpha'):
                if self.use_qcutoff:
                    replace_goal = np.logical_or(
                        (np.random.random(
                            (self.n_envs, 1)) > self.curiosity_alpha.alpha),
                        curr_q < self.cutoff).astype(np.float32)
                else:
                    replace_goal = (np.random.random(
                        (self.n_envs, 1)) > self.curiosity_alpha.alpha).astype(
                            np.float32)

            else:
                replace_goal = np.ones((self.n_envs, 1), dtype=np.float32)

            # sometimes keep the desired goal anyways
            replace_goal *= (np.random.uniform(size=[self.n_envs, 1]) >
                             self.keep_dg_percent).astype(np.float32)

            new_goals = replace_goal * chosen_ags + (
                1 - replace_goal) * self.current_goals

            if hasattr(self, 'logger') and len(self.successes) > 50:
                if q_values is not None:
                    self.logger.add_histogram(
                        'Explore/Goal_q', replace_goal * chosen_q_val +
                        (1 - replace_goal) * curr_q)
                self.logger.add_scalar('Explore/Intrinsic_success_percent',
                                       np.mean(self.successes))
                self.logger.add_scalar('Explore/Cutoff', self.cutoff)
                self.successes = []

            replace_goal = replace_goal.reshape(-1)

            for i in range(self.n_envs):
                if experience.trajectory_over[i]:
                    self.successes.append(float(self.is_success[i, 0] >= 1.)
                                          )  # compromise due to exploration
                    self.successes_deque.append(
                        float(self.is_success[i, 0] >= 1.)
                    )  # compromise due to exploration
                    self.current_goals[i] = new_goals[i]
                    if replace_goal[i]:
                        self.replaced_goal[i] = 1.
                    self.go_explore[i] = 0.
                    self.is_success[i] = 0.
Example #47
0
    # range 和 arrange 区别? arrange步长可以为小数, 且是numpy库来提供
    li2 = list(range(10))
    print(li2)
    arr3 = np.arange(0, 4, 0.5)
    print(arr3)

    # 等差,等比数列
    arr4 = np.linspace(0, 1, 11)
    print("linesapce arr4:\n", arr4)
    arr6 = np.logspace(0, 1, 3)
    print("logspace arr6:\n", arr6)

    # 特殊数组的生成接口
    arr7 = np.zeros((2, 3))
    print("zeros:\n", arr7)
    arr8 = np.eye(4)  # 对角线上全部为1
    print("eye: \n", arr8)
    arr9 = np.diag([2, 3, 4, 6])
    print("diag:\n", arr9)

    # 随机数生成

    # 使用random,只能生成1个随机数
    arr10 = random.random()  # 生成随机浮点数[0,1]
    arr11 = random.randint(1, 2)  # 生成[1,2]之间的1个数
    print(arr10, arr11)

    # 生成数组随机数? numpy 提供接口
    arr12 = np.random.random(4)  # 生成一维,4个0-1之间的小数
    arr13 = np.random.rand(2, 3)  # 生成2*3维数据,float
    arr14 = np.random.randn(3, 2)  # 生成3*2维标准正态分布的数据
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense


if __name__ == '__main__':
    np.random.seed(1234)

    '''
    Load data
    '''
    mnist = tf.keras.datasets.mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = (x_train.reshape(-1, 784) / 255).astype(np.float32)
    x_test = (x_test.reshape(-1, 784) / 255).astype(np.float32)
    y_train = np.eye(10)[y_train].astype(np.float32)
    y_test = np.eye(10)[y_test].astype(np.float32)

    '''
    Build model
    '''
    model = Sequential([
        Dense(200, activation='relu'),
        Dense(10, activation='softmax')
    ])

    model.compile('adam', loss='categorical_crossentropy',
                  metrics=['accuracy'])

    '''
    Train model
Example #49
0
# set the locations and labels of the yticks
plt.xticks(np.arange(6), ('$x$', '$y$', '$\dot x$', '$\dot y$'), fontsize=22)

plt.xlim([-0.5, 3.5])
plt.ylim([3.5, -0.5])

from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(im, cax=cax)

# ### Identity Matrix $I$

# In[12]:

I = np.eye(4)
print(I, I.shape)

# ## Measurements
#
# For example, we are using some random generated measurement values

# In[13]:

m = 200  # Measurements
vx = 20  # in X
vy = 10  # in Y

mx = np.array(vx + np.random.randn(m))
my = np.array(vy + np.random.randn(m))
Example #50
0
def _compute_Fisher_entry(symbol_i, symbol_j, symbol_values, symbol_arrays,
                          thetavals, thetaarrs):
    '''
    Compute the Fisher information entry for one pair of model 
    parameters. I.e. the partial of the lnlikelihood wrt to each symbol.
    '''
    # compute partial expressions
    Krv_sym, a_sym, l_sym, G_sym, P_sym, s_sym = symbol_values
    dt_sym, phi_sym, rv_sym, erv_sym, y_sym, K_sym, deltafunc_sym = symbol_arrays
    dy_didj = sympy.lambdify([Krv_sym, phi_sym, rv_sym],
                             sympy.diff(y_sym, symbol_i, symbol_j), 'numpy')
    dy_di = sympy.lambdify([Krv_sym, phi_sym, rv_sym],
                           sympy.diff(y_sym, symbol_i), 'numpy')
    dy_dj = sympy.lambdify([Krv_sym, phi_sym, rv_sym],
                           sympy.diff(y_sym, symbol_j), 'numpy')
    dK_didj = sympy.lambdify([a_sym, l_sym, G_sym, P_sym, s_sym,
                              deltafunc_sym, dt_sym, erv_sym],
                             sympy.diff(K_sym, symbol_i, symbol_j), 'numpy')
    dK_di = sympy.lambdify([a_sym, l_sym, G_sym, P_sym, s_sym,
                            deltafunc_sym, dt_sym, erv_sym],
                           sympy.diff(K_sym, symbol_i), 'numpy')
    dK_dj = sympy.lambdify([a_sym, l_sym, G_sym, P_sym, s_sym,
                            deltafunc_sym, dt_sym, erv_sym],
                           sympy.diff(K_sym, symbol_j), 'numpy')

    # evaluate partials at input values
    K_val, a_val, l_val, G_val, P_val, s_val = thetavals
    t_arr, phi_arr, rv_arr, erv_arr, Kinv =  thetaarrs
    N = t_arr.size
    deltat_mat = np.tile(t_arr, (N,1)) - np.tile(t_arr, (N,1)).T
    deltafunc_mat = np.eye(N)
    erv_mat = np.eye(N)*erv_arr
    dy_didj = _intovector(dy_didj(K_val, phi_arr, rv_arr), N)
    dy_di = _intovector(dy_di(K_val, phi_arr, rv_arr), N)
    dy_dj = _intovector(dy_dj(K_val, phi_arr, rv_arr), N)
    dK_didj = _intomatrix(dK_didj(a_val, l_val, G_val, P_val, s_val,
                                  deltafunc_mat, deltat_mat, erv_mat), N)
    dK_di = _intomatrix(dK_di(a_val, l_val, G_val, P_val, s_val,
                              deltafunc_mat, deltat_mat, erv_mat), N)
    dK_dj = _intomatrix(dK_dj(a_val, l_val, G_val, P_val, s_val,
                              deltafunc_mat, deltat_mat, erv_mat), N)

    # get Fisher terms to sum
    y_arr = _intovector(rv_arr - (-K_val*np.sin(phi_arr)), N)
    terms = np.zeros(11)
    terms[0] = np.dot(dy_didj.T, np.dot(Kinv, y_arr))
    terms[1] = -np.dot(dy_di.T, np.dot(Kinv, np.dot(dK_dj, np.dot(Kinv, y_arr))))
    terms[2] = np.dot(dy_di.T, np.dot(Kinv, dy_dj))
    terms[3] = -np.dot(dy_dj.T,np.dot(Kinv, np.dot(dK_di, np.dot(Kinv, y_arr))))
    terms[4] = np.dot(y_arr.T, np.dot(Kinv,
                                     np.dot(dK_dj,
                                            np.dot(Kinv,
                                                   np.dot(dK_di,
                                                          np.dot(Kinv, y_arr))))))
    terms[5] = -np.dot(y_arr.T, np.dot(Kinv, np.dot(dK_didj, np.dot(Kinv, y_arr))))
    terms[6] = np.dot(y_arr.T, np.dot(Kinv,
                                     np.dot(dK_di,
                                            np.dot(Kinv,
                                                   np.dot(dK_dj,
                                                          np.dot(Kinv, y_arr))))))
    terms[7] = -np.dot(y_arr.T, np.dot(Kinv, np.dot(dK_di, np.dot(Kinv, dy_dj))))
    terms[8] = np.dot(dy_dj.T, np.dot(Kinv, dy_di))
    terms[9] = -np.dot(y_arr.T, np.dot(Kinv, np.dot(dK_dj, np.dot(Kinv, dy_di))))
    terms[10] = np.dot(y_arr.T, np.dot(Kinv, dy_didj))

    return .5 * np.sum(terms)
Example #51
0
    clients.append(client)

print 'All calculations queued, waiting for results.'

t_clientstart = time.time()

# wait for input queues to empty
for input_q in input_qs:
    input_q.join()

t_clientrun = time.time()

print 'Input queues drained. Shutting down clients.'

# stop the clients by sending them a calculation with zero atoms
dummy_at = Atoms(n=0, lattice=np.eye(3))
dummy_data = pack_atoms_to_reftraj_str(dummy_at, 0)
for input_q in input_qs:
    input_q.put(dummy_data)

# wait for them all to shutdown
for client in clients:
    client.wait()

print 'Clients terminated.'

for stdout in stdouts:
    stdout.flush()
    stdout.close()

print 'Client logs flushed.'
Example #52
0
    def initialize(self, Xf, init_labels={}):
        """
    Xf      --> r x n matrix of feature values for each point.
            where r is # features, n is # points.
    init_labels --> dictionary from index to label of initial labels.
    """
        self.reset()

        # Save Xf and initialize some of the variables which depend on Xf
        self.Xf = Xf
        self.r, self.n = Xf.shape

        self.labeled_idxs = init_labels.keys()
        self.unlabeled_idxs = list(set(range(self.n)) - set(self.labeled_idxs))

        self.labels = np.array([-1] * self.n)
        for lidx in self.labeled_idxs:
            self.labels[lidx] = init_labels[lidx]

        # Initialize some parameters and constants which are needed and not yet initialized
        if self.params.sparse:
            self.Ir = ss.eye(self.r)
        else:
            self.Ir = np.eye(self.r)

        self.l = (1 - self.params.eta) / self.params.eta
        if self.params.w0 is None:
            self.params.w0 = 1 / self.n

        # Set up some of the initial values of some matrices needed to compute D, BDinv, q and f
        B = np.where(self.labels == -1, 1 / (1 + self.params.w0),
                     self.l / (1 + self.l))
        # B[self.labeled_idxs] = self.l/(1+self.l)
        D = np.squeeze(Xf.T.dot(Xf.dot(np.ones((self.n, 1)))))
        if self.params.remove_self_degree:
            Ds = matrix_squeeze((Xf.multiply(Xf)).sum(0))
            D = D - Ds

        self.Dinv = 1. / D

        self.BDinv = np.squeeze(B * self.Dinv)
        if self.params.sparse:
            self.BDinv_ss = ss.diags([np.squeeze(B * self.Dinv)], [0]).tocsr()

        self.q = (1 - B) * np.where(
            self.labels == -1, self.params.pi,
            self.labels)  # Need to update q every iteration
        #self.q[self.labeled_idxs] *= np.array(init_labels.values())/self.params.pi

        # Constructing and inverting C
        if self.params.verbose:
            print("Constructing C")
            t1 = time.time()
        if self.params.sparse:
            C = (self.Ir - self.Xf.dot(self.BDinv_ss.dot(self.Xf.T)))
        else:
            C = (self.Ir - self.Xf.dot(self.BDinv[:, None] * self.Xf.T))
        if self.params.verbose:
            print("Time for constructing C:", time.time() - t1)

        if self.params.verbose:
            print("Inverting C")
            t1 = time.time()
        # Our matrix is around 40% sparse which makes ssl.inv run very slowly. We will just use the regular nlg.inv
        if self.params.sparse:
            self.Cinv = ss.csr_matrix(nlg.inv(
                C.todense()))  # Need to update Cinv every iteration
        else:
            self.Cinv = nlg.inv(C)

        if self.params.verbose:
            print("Time for inverse:", time.time() - t1)

        # Just keeping this around. Don't really need it.
        if self.params.sparse:
            self.f = self.q + self.BDinv_ss.dot(
                ((self.Xf.T.dot(self.Cinv.dot(self.Xf.dot(self.q))))))
        else:
            self.f = self.q + self.BDinv * (
                (self.Xf.T.dot(self.Cinv.dot(self.Xf.dot(self.q)))))

        # Impact factor calculations
        if self.params.alpha > 0:
            # 0. Some useful variables
            self.dP = (1. / self.l - self.params.w0) * D  # L - U
            self.dPpi = (1. / self.l -
                         self.params.pi * self.params.w0) * D  # L - pi*U

            # 1. Df_tilde
            # First, we need J = diag (X^T * Cinv * X): each element of J is x_i^T*Cinv*x_i
            if self.params.sparse:
                self.J = matrix_squeeze(
                    ((self.Cinv.dot(self.Xf)).multiply(self.Xf)).sum(0))
            else:
                self.J = np.squeeze(
                    ((self.Cinv.dot(self.Xf)) * self.Xf).sum(0))
            # Now we compute the entire diag
            diagMi = (1 + self.BDinv * self.J) * self.BDinv
            # Finally, Df_tilde
            dpf = (self.dPpi - self.dP * self.f)
            Df_tilde = dpf * diagMi / (1 + self.dP * diagMi)

            # 2. DF
            # z = (I-B)Pinv*u = B*Dinv*u (these are defined in Kernel AS notes)
            self.z = np.where(self.labels == -1, self.BDinv, 0)
            if self.params.sparse:
                Minv_u = self.z + self.BDinv_ss.dot(
                    self.Xf.T.dot(self.Cinv.dot(self.Xf.dot(self.z))))
            else:
                Minv_u = self.z + self.BDinv * (self.Xf.T.dot(
                    self.Cinv.dot(self.Xf.dot(self.z))))

            DF = (dpf - self.dP * Df_tilde) * Minv_u

            # 3. IM
            self.IM = self.f * (DF - Df_tilde)

            # Normalize IM
            self.IM = self.IM * self.f.mean() / self.IM.mean()

        # Setting iter/start_point
        # If batch initialization is done, then start_point is everything given
        if len(self.labeled_idxs) > 0:
            if len(self.labeled_idxs) == 0:
                self.start_point = self.labeled_idxs[0]
            else:
                self.start_point = [eid for eid in self.labeled_idxs]
            # Finding the next message to show -- get the current max element
            if self.params.alpha > 0:
                uidx = np.argmax(
                    (self.f +
                     self.params.alpha * self.IM)[self.unlabeled_idxs])
            else:
                uidx = np.argmax(self.f[self.unlabeled_idxs])
            self.next_message = self.unlabeled_idxs[uidx]
            # Now that a new message has been selected, mark it as unseen
            self.seen_next = False

            self.iter = 0
            self.hits = [sum(init_labels.values())]

        if self.params.verbose:
            print("Done with the initialization.")

        self.initialized = True
Example #53
0
    def run(self,
            static_files,
            moving_files,
            x0='affine',
            rm_small_clusters=50,
            qbx_thr=[40, 30, 20, 15],
            num_threads=None,
            greater_than=50,
            less_than=250,
            nb_pts=20,
            progressive=True,
            out_dir='',
            out_moved='moved.trk',
            out_affine='affine.txt',
            out_stat_centroids='static_centroids.trk',
            out_moving_centroids='moving_centroids.trk',
            out_moved_centroids='moved_centroids.trk'):
        """ Streamline-based linear registration.

        For efficiency we apply the registration on cluster centroids and
        remove small clusters.

        Parameters
        ----------
        static_files : string
        moving_files : string
        x0 : string, optional
            rigid, similarity or affine transformation model.
        rm_small_clusters : int, optional
            Remove clusters that have less than `rm_small_clusters`.
        qbx_thr : variable int, optional
            Thresholds for QuickBundlesX.
        num_threads : int, optional
            Number of threads to be used for OpenMP parallelization. If None
            (default) the value of OMP_NUM_THREADS environment variable is
            used if it is set, otherwise all available threads are used. If
            < 0 the maximal number of threads minus |num_threads + 1| is used
            (enter -1 to use as many threads as possible). 0 raises an error.
            Only metrics using OpenMP will use this variable.
        greater_than : int, optional
            Keep streamlines that have length greater than
            this value.
        less_than : int, optional
            Keep streamlines have length less than this value.
        np_pts : int, optional
            Number of points for discretizing each streamline.
        progressive : boolean, optional
        out_dir : string, optional
            Output directory. (default current directory)
        out_moved : string, optional
            Filename of moved tractogram.
        out_affine : string, optional
            Filename of affine for SLR transformation.
        out_stat_centroids : string, optional
            Filename of static centroids.
        out_moving_centroids : string, optional
            Filename of moving centroids.
        out_moved_centroids : string, optional
            Filename of moved centroids.

        Notes
        -----
        The order of operations is the following. First short or long
        streamlines are removed. Second the tractogram or a random selection
        of the tractogram is clustered with QuickBundlesX. Then SLR
        [Garyfallidis15]_ is applied.

        References
        ----------
        .. [Garyfallidis15] Garyfallidis et al. "Robust and efficient linear
        registration of white-matter fascicles in the space of
        streamlines", NeuroImage, 117, 124--140, 2015

        .. [Garyfallidis14] Garyfallidis et al., "Direct native-space fiber
        bundle alignment for group comparisons", ISMRM, 2014.

        .. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
        bundles using local and global streamline-based registration
        and clustering, NeuroImage, 2017.
        """

        io_it = self.get_io_iterator()

        logging.info("QuickBundlesX clustering is in use")
        logging.info('QBX thresholds {0}'.format(qbx_thr))

        for static_file, moving_file, out_moved_file, out_affine_file, \
                static_centroids_file, moving_centroids_file, \
                moved_centroids_file in io_it:

            logging.info('Loading static file {0}'.format(static_file))
            logging.info('Loading moving file {0}'.format(moving_file))

            static_obj = nib.streamlines.load(static_file)
            moving_obj = nib.streamlines.load(moving_file)

            static, static_header = static_obj.streamlines, static_obj.header
            moving, moving_header = moving_obj.streamlines, moving_obj.header

            moved, affine, centroids_static, centroids_moving = \
                slr_with_qbx(
                    static, moving, x0, rm_small_clusters=rm_small_clusters,
                    greater_than=greater_than, less_than=less_than,
                    qbx_thr=qbx_thr)

            logging.info('Saving output file {0}'.format(out_moved_file))
            new_tractogram = nib.streamlines.Tractogram(
                moved, affine_to_rasmm=np.eye(4))
            nib.streamlines.save(new_tractogram,
                                 out_moved_file,
                                 header=moving_header)

            logging.info('Saving output file {0}'.format(out_affine_file))
            np.savetxt(out_affine_file, affine)

            logging.info(
                'Saving output file {0}'.format(static_centroids_file))
            new_tractogram = nib.streamlines.Tractogram(
                centroids_static, affine_to_rasmm=np.eye(4))
            nib.streamlines.save(new_tractogram,
                                 static_centroids_file,
                                 header=static_header)

            logging.info(
                'Saving output file {0}'.format(moving_centroids_file))
            new_tractogram = nib.streamlines.Tractogram(
                centroids_moving, affine_to_rasmm=np.eye(4))
            nib.streamlines.save(new_tractogram,
                                 moving_centroids_file,
                                 header=moving_header)

            centroids_moved = transform_streamlines(centroids_moving, affine)

            logging.info('Saving output file {0}'.format(moved_centroids_file))

            new_tractogram = nib.streamlines.Tractogram(
                centroids_moved, affine_to_rasmm=np.eye(4))
            nib.streamlines.save(new_tractogram,
                                 moved_centroids_file,
                                 header=moving_header)
Example #54
0
    cv2.destroyAllWindows()
    if args.show_img:
        pass
    else:
        print('Show image is disabled, add "--show_img" to enable it')

    path_pose_file = os.path.join(args.position_dir, '00.txt')
    path_scale_file = os.path.join(args.position_dir, 'scale_00.txt')
    with open(path_scale_file, 'r') as f_scale:
        scale = float(f_scale.readline().strip())
        print('[Debug]scale = {}'.format(scale))
    # position = _get_positions(path_pose_file, path_scale_file)
    # cam_poses = _get_cam_poses(path_pose_file, path_scale_file)
    fin_pose = open(path_pose_file)
    cam_poses = [np.hstack((np.eye(3), np.zeros((3, 1))))]  # origin pose
    landmarks = []

    match_npy_filelist = sorted(
        [file for file in os.listdir(inliers_match_dir) if '.npy' in file])
    if 0 < args.numIters < len(match_npy_filelist):
        match_npy_filelist = match_npy_filelist[:args.numIters]
    elif args.numIters > len(match_npy_filelist):
        print(
            'The folder only contains {} files, which is less than numIters({}) given'
            .format(len(match_npy_filelist), args.numIters))

    # initialize variables
    points0_prev, points1_prev = None, None
    depth1_curr = np.load(
        os.path.join(inliers_depth_dir, 'depth_seq_00_0000.npy'))
def modal_derivatives(V,
                      omega,
                      K_func,
                      M,
                      h=1.0,
                      verbose=True,
                      symmetric=True,
                      finite_diff='central'):
    r'''
    Compute the basis theta based on real modal derivatives.

    Parameters
    ----------
    V : ndarray
        array containing the linear basis
    omega : ndarray
        eigenfrequencies of the system in rad/s.
    K_func : function
        function returning the tangential stiffness matrix for a given
        displacement. Has to work like K = K_func(u).
    M : ndarray or sparse matrix
        Mass matrix of the system.
    h : float, optional
        step width for finite difference scheme. Default value is 1.0
    verbose : bool, optional
        flag for verbosity. Default value: True
    symmetric : bool, optional
        flag for making the modal derivative matrix theta symmetric. Default is
        `True`.
    finite_diff : str {'central', 'upwind'}
        Method for finite difference scheme. 'central' computes the finite difference
        based on a central difference scheme, 'upwind' based on an upwind scheme. Note
        that the upwind scheme can cause severe distortions of the modal derivative.

    Returns
    -------
    Theta : ndarray
        three dimensional array of modal derivatives. Theta[:,i,j] contains
        the modal derivative 1/2 * dx_i / dx_j. The basis Theta is made symmetric, so
        that `Theta[:,i,j] == Theta[:,j,i]` if `symmetic=True`.

    See Also
    --------
    static_derivatives : modal derivative with mass neglection but much faster in computation.
    '''
    no_of_dofs = V.shape[0]
    no_of_modes = V.shape[1]
    Theta = np.zeros((no_of_dofs, no_of_modes, no_of_modes))

    # Check, if V is mass normalized:
    if not np.allclose(np.eye(no_of_modes), V.T @ M @ V, rtol=1E-5, atol=1E-8):
        Exception('The given modes are not mass normalized!')

    K = K_func(np.zeros(no_of_dofs))

    for i in range(no_of_modes):  # looping over the columns
        x_i = V[:, i]
        K_dyn_i = K - omega[i]**2 * M

        # fix the point with the maximum displacement of the vibration mode
        fix_idx = np.argmax(abs(x_i))
        K_dyn_i[:, fix_idx], K_dyn_i[fix_idx, :], K_dyn_i[fix_idx,
                                                          fix_idx] = 0, 0, 1

        # factorization of the dynamic stiffness matrix
        if verbose:
            print(
                'Factorizing the dynamic stiffness matrix for eigenfrequency',
                '{0:d} with {1:4.2f} rad/s.'.format(i, omega[i]))
        LU_object = SpSolve(K_dyn_i)

        for j in range(no_of_modes):  # looping over the rows
            x_j = V[:, j]
            # finite difference scheme
            if finite_diff == 'central':
                dK_dx_j = (K_func(h * x_j) - K_func(-h * x_j)) / (2 * h)
            elif finite_diff == 'upwind':
                dK_dx_j = (K_func(h * x_j) - K) / h
            else:
                raise ValueError('Finite difference scheme is not valid.')

            d_omega_2_d_x_i = x_i @ dK_dx_j @ x_i
            F_i = (d_omega_2_d_x_i * M - dK_dx_j) @ x_i
            F_i[fix_idx] = 0
            v_i = LU_object.solve(F_i)
            c_i = -v_i @ M @ x_i
            Theta[:, i, j] = v_i + c_i * x_i

    LU_object.clear()
    if symmetric:
        Theta = 1 / 2 * (Theta + Theta.transpose((0, 2, 1)))
    return Theta
Example #56
0
def define_ha(settings, usafe_r=None):
    '''make the hybrid automaton and return it'''

    ha = LinearHybridAutomaton()
    ha.variables = ["x1", "x2"]
    #
    loc1 = ha.new_mode('loc1')
    # a_matrix = np.array([[0.0, 2.0], [1.0, 0.0]], dtype=float)

    # exp 1 and 2
    a_matrix = np.array([[0.0, 2.0], [-1.5, 0.0]], dtype=float)

    # exp 1
    b_matrix = np.array([[1], [-1]], dtype=float)

    # # exp2
    # b_matrix = np.array([[1], [1]], dtype=float)

    print(a_matrix, b_matrix)
    R_mult_factor = 0.2

    Q_matrix = np.eye(len(a_matrix[0]), dtype=float)

    u_dim = len(b_matrix[0])
    R_matrix = R_mult_factor * np.eye(u_dim)

    print(a_matrix, b_matrix, Q_matrix, R_matrix)
    k_matrix = get_input(a_matrix, b_matrix, Q_matrix, R_matrix)

    print(k_matrix)
    # a_bk_matrix = a_matrix_ext - np.matmul(b_matrix_ext, k_matrix)
    a_bk_matrix = a_matrix - np.matmul(b_matrix, k_matrix)
    # print(a_bk_matrix)
    loc1.a_matrix = a_bk_matrix
    loc1.c_vector = np.array([0.0, 0.0], dtype=float)
    # print(a_bk_matrix)

    # loc1.a_matrix = np.array([[0.0, 2.0], [1.0, 0.0]], dtype=float)
    # loc1.c_vector = np.array([0.0, -9.81], dtype=float)

    error = ha.new_mode('_error')
    error.is_error = True

    usafe_set_constraint_list = []
    if usafe_r is None:
        # exp 1
        usafe_set_constraint_list.append(LinearConstraint([-1.0, 0.0], -2.0))

        # exp 2 - Significant diff across equivalent and non-equivalent runs for p_intersect reverse
        # usafe_set_constraint_list.append(LinearConstraint([0.0, 1.0], -0.85))

    else:
        usafe_star = init_hr_to_star(settings, usafe_r, ha.modes['_error'])
        for constraint in usafe_star.constraint_list:
            usafe_set_constraint_list.append(constraint)

    trans = ha.new_transition(loc1, error)
    for constraint in usafe_set_constraint_list:
        trans.condition_list.append(constraint)

    return ha, usafe_set_constraint_list
def triangulate_points(cameras,
                       filtered_applied,
                       stereo_triangulation,
                       min_likelihood=0.7):
    if len(cameras) < 2:
        raise Exception('Triangulation process needs at least two cameras')
    number_of_frames = len(cameras[0].frames)
    number_of_markers = len(cameras[0].frames[0].markers)
    triangulated_frames = []
    stereo_pair = None

    if stereo_triangulation:
        stereo_pair = get_best_pair(cameras)

    # set up filter values
    dt = 1.0 / 24
    transition_matrix = np.eye(9, dtype=np.float32)
    transition_matrix[0][3] = dt
    transition_matrix[0][6] = 0.5 * dt * dt
    transition_matrix[1][4] = dt
    transition_matrix[1][7] = 0.5 * dt * dt
    transition_matrix[2][5] = dt
    transition_matrix[2][8] = 0.5 * dt * dt
    measurement_matrix = np.array([(1, 0, 0, 0, 0, 0, 0, 0, 0),
                                   (0, 1, 0, 0, 0, 0, 0, 0, 0),
                                   (0, 0, 1, 0, 0, 0, 0, 0, 0)],
                                  dtype=np.float32)

    # init filters for all markers tracked
    filters = []
    for i in range(number_of_markers):
        kalman_filter = cv2.KalmanFilter(9, 3, 0)
        kalman_filter.transitionMatrix = transition_matrix
        kalman_filter.measurementMatrix = measurement_matrix
        filters.append(Filter(kalman_filter))
    for i in range(number_of_frames):
        triangulated_markers = []
        for j in range(number_of_markers):
            visible_counter = 0
            for camera in cameras:
                if camera.frames[i].markers[j].likelihood > 0 and \
                        ((stereo_triangulation and (camera in stereo_pair[0] or camera in stereo_pair[1]))
                         or not stereo_triangulation):
                    visible_counter += 1

            if visible_counter < 2:
                continue

            # check if old stereo triangulation method is used
            if stereo_triangulation:
                best_cameras = get_front_back_cameras_for_marker(
                    stereo_pair, i, j, 0.5)
                triangulated = triangulate_point(best_cameras, i, j,
                                                 best_cameras[0].image_size)
            else:
                # use n view triangulation method
                best_cameras = get_best_cameras(cameras, i, j, len(cameras),
                                                min_likelihood)
                system = MultiCameraSystem([cam.model for cam in best_cameras])
                points = [
                    (cam.model.name,
                     [cam.frames[i].markers[j].x, cam.frames[i].markers[j].y])
                    for cam in best_cameras
                ]
                triangulated = system.find3d(points)
            average_likelihood = np.mean(
                [cam.frames[i].markers[j].likelihood for cam in best_cameras])
            point_triangulated = triangulated is not None and average_likelihood > min_likelihood
            marker_key = best_cameras[0].frames[i].markers[j].marker_key

            if point_triangulated:
                # check if kalman filter is necessary
                if filtered_applied:
                    triangulated_ec_world_frame_formatted = np.array(
                        ([triangulated]), np.float32).T
                    # compensate for the initial state set to 0,0,0 in opencv kalman filter
                    if filters[j].first:
                        for l in range(100):
                            filters[j].filter.predict()
                            filters[j].filter.correct(
                                triangulated_ec_world_frame_formatted)
                        filters[j].first = False
                    filters[j].filter.predict()
                    estimated = filters[j].filter.correct(
                        triangulated_ec_world_frame_formatted)
                    # append triangulated point
                    triangulated_markers.append({
                        'point':
                        np.array([
                            estimated[0][0], estimated[1][0], estimated[2][0]
                        ]),
                        'marker':
                        marker_key,
                        'cameras':
                        "".join([str(cam.number) for cam in best_cameras]),
                        'likelihood':
                        str(average_likelihood)
                    })
                else:
                    # append triangulated point
                    triangulated_markers.append({
                        'point':
                        triangulated,
                        'marker':
                        marker_key,
                        'cameras':
                        "".join([str(cam.number) for cam in best_cameras]),
                        'likelihood':
                        str(average_likelihood)
                    })

        triangulated_frames.append(triangulated_markers)
    return triangulated_frames
Example #58
0
import numpy as np

def gamma_pseudoinverse(gmat, n):
    g = np.zeros_like(gmat)
    for i in range(n):
        g[i,i] = 1/ np.sqrt(1 + g[i,i])
        
    return g

if __name__ =='__main__':
    x = 2 ** 0.5 * np.array([[1.0,1.0,0.0],[-1.0,0.0, 1.0],[0.0,-1.0,-1.0]])

    H = np.eye(3)
    R = np.eye(3)

    xdelta = x - x.mean(axis=0, keepdims=True)

    B = 0.5 * xdelta.T @ xdelta

    #H is identity
    K = B @ np.linalg.inv(B + R)

    #again, H is identity
    C = (H - K) @ B

    print("Posterior covariance: ", C)

    A = xdelta.T / (2 ** 0.5)

    V = H @ A
Example #59
0
def generate_logistic_regression_data(num_users=100,
                                      rho=10,
                                      dim=40,
                                      lamb=1,
                                      noise_ratio=0.05):
    # For consistent results
    np.random.seed(0)

    # Sanity check
    assert (rho >= 1 and num_users > 0 and dim > 0)

    X_split = [[] for _ in range(num_users)]  # X for each user
    y_split = [[] for _ in range(num_users)]  # y for each user

    # Find users' sample sizes based on the power law (heterogeneity)
    samples_each_user = 10000
    samples_per_user = np.array(num_users * [samples_each_user])
    indices_per_user = np.insert(samples_per_user.cumsum(), 0, 0, 0)
    num_total_samples = indices_per_user[-1]

    # Each user's mean is drawn from N(0, 1) (non-i.i.d. data)
    mean_X = np.random.randn(dim)

    # Covariance matrix for X
    Sigma = np.eye(dim)

    # L = 1, beta = LAMBDA
    #L = 100 if rho == 1 else 1 / (rho - 1)
    L = lamb / (rho - 1)
    # Keep all users' inputs and labels in one array,
    # indexed according to indices_per_user.
    #   (e.g. X_total[indices_per_user[n]:indices_per_user[n+1], :] = X_n)
    #   (e.g. y_total[indices_per_user[n]:indices_per_user[n+1]] = y_n)
    X_total = np.zeros((num_total_samples, dim))
    y_total = np.zeros(num_total_samples)

    for n in range(num_users):
        # Generate data
        X_n = np.random.multivariate_normal(mean_X, Sigma, samples_per_user[n])
        X_total[indices_per_user[n]:indices_per_user[n + 1], :] = X_n

    # Normalize all X's using LAMBDA
    norm = np.sqrt(
        np.linalg.norm(X_total.T.dot(X_total), 2) / num_total_samples)
    X_total /= norm + L

    # Generate weights and labels
    W = np.random.rand(dim)
    y_total = logit(X_total, W)
    y_predicted = y_total.copy()
    y_total = np.where(y_total >= 0.5, 1, 0)

    # Apply noise: randomly flip some of y_n with probability noise_ratio
    noise = np.random.binomial(1, noise_ratio, num_total_samples)
    y_total = np.multiply(noise - y_total, noise) + np.multiply(
        y_total, 1 - noise)

    loss = np.average(-y_total * np.log(y_predicted) -
                      (1 - y_total) * np.log(1 - y_predicted))

    # Save each user's data separately
    for n in range(num_users):
        X_n = X_total[indices_per_user[n]:indices_per_user[n + 1], :]
        y_n = y_total[indices_per_user[n]:indices_per_user[n + 1]]
        X_split[n] = X_n.tolist()
        y_split[n] = y_n.tolist()

        # print("User {} has {} samples.".format(n, samples_per_user[n]))

    # Save the optimal weights
    with open("data/optimal_weights.npy", "wb") as f:
        np.save(f, W)
        np.save(f, loss)
    # if not os.path.exists("data"):
    #     os.mkdir("data")
    # np.save("data/optimal_weights.npy", W)

    print("=" * 80)
    print("Generated synthetic data for logistic regression successfully.")
    print("Summary of the generated data:".format(rho))
    print("    Total # users       : {}".format(num_users))
    print("    Input dimension     : {}".format(dim))
    print("    rho                 : {}".format(rho))
    print("    Total # of samples  : {}".format(num_total_samples))
    print("    Minimum # of samples: {}".format(np.min(samples_per_user)))
    print("    Maximum # of samples: {}".format(np.max(samples_per_user)))
    print("=" * 80)

    return X_split, y_split
w = 2 * np.pi  # fréquence propre
a = 0.25  # rapport d'amotissement
A = np.array([[0, 1], [-w**2, -2 * a * w]])
dt = 0.01  # pas du temps
Tf = 10  # temps finale de la simulation
nsteps = int(Tf / dt)
# CONDITION INITIAL: à t = 0; x = 2, v = 0
u0 = np.array([2, 0])
#%% ITÉRATION: EULER ExPLICITE
Texp = np.zeros(nsteps)
Uexp = np.zeros((2, nsteps))
Texp[0] = 0.0
Uexp[:, 0] = u0
for k in range(nsteps - 1):
    Texp[k + 1] = Texp[k] + dt
    Uexp[:, k + 1] = np.dot((np.eye(2) + dt * A), Uexp[:, k])

plt.figure(figsize=(10, 5))
# PLOT POSITION vs TEMPS
plt.suptitle(
    "Simulation d'un oscillateur libre amorti avec un pas d'intégration " +
    r"$ \Delta t= %.2f$" % dt,
    fontweight="bold")
plt.subplot(1, 2, 1)
plt.plot(Texp, Uexp[0, :], linewidth=2, color='k')
plt.xlabel("Temps")
plt.ylabel("Position")
plt.title("Trajectoire de la mass M (Euler explicite)")
# DIAGRAMME DE PHASE 2D
plt.subplot(1, 2, 2)
plt.plot(Uexp[0, :], Uexp[1, :], linewidth=2, color='k')