コード例 #1
0
def compute_distances_np(line, pts, result, gates, tolerance):
    '''calculate all distances with NumPy'''
    # Adapted from https://math.stackexchange.com/questions/1905533/find-perpendicular-distance-from-point-to-line-in-3d

    np_pts = array(pts)
    segment = V(line[-1]) - V(line[0])
    segment_length = segment.length
    line_direction = segment / segment_length
    vect = np_pts - line[0]
    vect_proy = vect.dot(line_direction)
    closest_point = line[0] + vect_proy[:, newaxis] * line_direction
    dif_v = closest_point - np_pts
    dist = np_norm(dif_v, axis=1)

    is_in_segment = []
    is_in_line = []
    closest_in_segment = []
    if gates[4] or gates[1]:
        closest_in_segment = np_all([vect_proy >= 0, vect_proy <= segment_length], axis=0)
    if gates[1] or gates[2]:
        np_tolerance = array(tolerance)
        is_in_line = dist < tolerance
        if gates[1]:
            is_in_segment = np_all([closest_in_segment, is_in_line], axis=0)

    local_result = [dist, is_in_segment, is_in_line, closest_point, closest_in_segment]

    for i, res in enumerate(result):
        if gates[i]:
            res.append(local_result[i].tolist() if not gates[5] else local_result[i])
コード例 #2
0
def compute_distances_np(line, pts, result, gates, tolerance):
    '''calculate all distances with NumPy'''
    # Adapted from https://math.stackexchange.com/questions/1905533/find-perpendicular-distance-from-point-to-line-in-3d

    np_pts = array(pts)
    segment = V(line[-1]) - V(line[0])
    segment_length = segment.length
    line_direction = segment / segment_length
    vect = np_pts - line[0]
    vect_proy = vect.dot(line_direction)
    closest_point = line[0] + vect_proy[:, newaxis] * line_direction
    dif_v = closest_point - np_pts
    dist = np_norm(dif_v, axis=1)

    is_in_segment = []
    is_in_line = []
    closest_in_segment = []
    if gates[4] or gates[1]:
        closest_in_segment = np_all(
            [vect_proy >= 0, vect_proy <= segment_length], axis=0)
    if gates[1] or gates[2]:
        np_tolerance = array(tolerance)
        is_in_line = dist < tolerance
        if gates[1]:
            is_in_segment = np_all([closest_in_segment, is_in_line], axis=0)

    local_result = [
        dist, is_in_segment, is_in_line, closest_point, closest_in_segment
    ]

    for i, res in enumerate(result):
        if gates[i]:
            res.append(
                local_result[i].tolist() if not gates[5] else local_result[i])
コード例 #3
0
def update_FEMM_simulation(
    femm,
    circuits,
    is_internal_rotor,
    is_sliding_band,
    angle_rotor,
    Is,
    Ir,
    ii,
):
    """Update the simulation by changing the rotor position and
    updating the currents


    Parameters
    ----------
    femm : FEMMHandler
        client to send command to a FEMM instance
    circuits :
        Output object
    is_internal_rotor: bool
        True if it is an internal rotor topology
    is_sliding_band: bool
        True if it is an internal rotor topology
    angle_rotor: ndarray
        Vector of rotor angular position over time [Nt,]
    Is : ndarray
        Stator currents function of phase and time [qs,Nt]
    Ir : ndarray
        Rotor currents function of phase and time [qr,Nt]
    ii : int
        Time step index

    """

    if is_sliding_band:  # No rotation without sliding band.
        # Rotor rotation using sliding band
        if is_internal_rotor:
            femm.mi_modifyboundprop("bc_ag2", 10, 180 * angle_rotor[ii] / pi)
        else:
            femm.mi_modifyboundprop("bc_ag2", 11, 180 * angle_rotor[ii] / pi)

    # Update currents
    for label in circuits:
        if "Circs" in label and Is is not None and not np_all(Is == 0):  # Stator
            set_FEMM_circuit_prop(
                femm,
                circuits,
                label,
                Is[:, ii],
            )
        if "Circr" in label and Ir is not None and not np_all(Ir == 0):  # Rotor
            set_FEMM_circuit_prop(
                femm,
                circuits,
                label,
                Ir[:, ii],
            )
コード例 #4
0
ファイル: comp_force.py プロジェクト: squall0629/pyleecan
def comp_force(self, output):
    """Compute the air-gap surface force based on Maxwell Tensor (MT).

    Parameters
    ----------
    self : ForceMT
        A ForceMT object

    output : Output
        an Output object (to update)
    """

    mu_0 = 4 * pi * 1e-7

    # Load magnetic flux
    Brphiz = output.mag.B.get_rphiz_along("time", "angle")
    Br = Brphiz["radial"]
    Bt = Brphiz["tangential"]
    Bz = Brphiz["axial"]

    # Compute AGSF with MT formula
    Prad = (Br * Br - Bt * Bt - Bz * Bz) / (2 * mu_0)
    Ptan = Br * Bt / mu_0
    Pz = Br * Bz / mu_0

    # Store the results
    components = {}
    if not np_all((Prad == 0)):
        Prad_data = DataTime(
            name="Airgap radial surface force",
            unit="N/m2",
            symbol="P_r",
            axes=list(output.mag.B.components.values())[0].axes,
            values=Prad,
        )
        components["radial"] = Prad_data
    if not np_all((Ptan == 0)):
        Ptan_data = DataTime(
            name="Airgap tangential surface force",
            unit="N/m2",
            symbol="P_t",
            axes=list(output.mag.B.components.values())[0].axes,
            values=Ptan,
        )
        components["tangential"] = Ptan_data
    if not np_all((Pz == 0)):
        Pz_data = DataTime(
            name="Airgap axial surface force",
            unit="N/m2",
            symbol="P_z",
            axes=list(output.mag.B.components.values())[0].axes,
            values=Pz,
        )
        components["axial"] = Pz_data
    output.force.P = VectorField(name="Magnetic airgap surface force",
                                 symbol="P",
                                 components=components)
コード例 #5
0
ファイル: dae_writer.py プロジェクト: efim/arboris-python
 def _set_up_axis(self, up):
     """Add an up_axis element."""
     if np_all(up == [1., 0., 0.]): up_elem = collada.asset.UP_AXIS.X_UP
     elif np_all(up == [0., 1., 0.]): up_elem = collada.asset.UP_AXIS.Y_UP
     elif np_all(up == [0., 0., 1.]): up_elem = collada.asset.UP_AXIS.Z_UP
     else:
         up_elem = up_elem = collada.asset.UP_AXIS.Y_UP
         warnings.warn('the up vector (' + up +
                       ') is not compatible with collada, set default.')
     self.dae.assetInfo.upaxis = up_elem
コード例 #6
0
ファイル: dae_writer.py プロジェクト: salini/arboris-python
 def _set_up_axis(self, up):
     """Add an up_axis element."""
     if np_all(up == [1.0, 0.0, 0.0]):
         up_elem = collada.asset.UP_AXIS.X_UP
     elif np_all(up == [0.0, 1.0, 0.0]):
         up_elem = collada.asset.UP_AXIS.Y_UP
     elif np_all(up == [0.0, 0.0, 1.0]):
         up_elem = collada.asset.UP_AXIS.Z_UP
     else:
         up_elem = up_elem = collada.asset.UP_AXIS.Y_UP
         warnings.warn("the up vector (" + up + ") is not compatible with collada, set default.")
     self.dae.assetInfo.upaxis = up_elem
コード例 #7
0
 def tensor_distribution(dist: Union[int, ndarray],
                         exponent: float) -> FloatTensor:
     if isinstance(dist, int):
         n = dist
         weights = from_numpy(full(n, 1.0 / n, dtype=float32))
         if exponent != 1.0:
             print(
                 "Warning: negative sampling exponent has no effect when a distribution is not "
                 "explicitly given; the uniform distribution will be used."
             )
     elif isinstance(dist, ndarray):
         dist = array(
             dist, dtype=float32
         )  # in case dist is a np.matrix, which complicates squeezing
         if dist.ndim != 1 and dist.shape.count(1) < dist.ndim - 1:
             raise ValueError(
                 "if an array, `input/output_dist` must be squeezeable to a 1-dimensional "
                 "array.")
         assert np_all(
             dist >= 0
         ), "`input/output_dist`, if given as an array, must have nonnegative entries."
         dist **= exponent
         dist /= dist.sum()
         weights = from_numpy(dist)
     else:
         raise TypeError(
             "`input/output_dist` must be an int representing the number of classes or a "
             "numpy array of positive numbers that is squeezeable to a 1-dimensional array."
         )
     return weights
コード例 #8
0
def v_θ_deriv_splitter(soln, cutoff=DEFAULT_SPLITTER_CUTOFF, **kwargs):
    """
    Use derivative of v_θ to determine type of solution
    """
    # pylint: disable=unused-argument
    v_θ = soln.solution[:, ODEIndex.v_θ]
    if soln.internal_data is not None:
        problems = soln.internal_data.problems
        if any("negative velocity" in pl for pl in problems.values()):
            return SplitterStatus.SIGN_FLIP
    else:
        log.notice("Skipping checking problems due to no internal data")

    if soln.internal_data is not None:
        deriv_v_θ = soln.internal_data.derivs[ODEIndex.v_θ]
        if any(diff(deriv_v_θ, 2) > 0):
            return SplitterStatus.DIVERGE
        if any(diff(deriv_v_θ) < 0):
            return SplitterStatus.SIGN_FLIP

    v_θ_near_sonic = np_and(v_θ > cutoff, v_θ < 1)
    if not np_any(v_θ_near_sonic):
        return SplitterStatus.SIGN_FLIP

    v_θ_above_sonic = v_θ > 1
    if np_any(v_θ_above_sonic):
        return SplitterStatus.DIVERGE

    d_v_θ = diff(v_θ[v_θ_near_sonic])
    if np_all(d_v_θ > 0):
        return SplitterStatus.DIVERGE
    return SplitterStatus.SIGN_FLIP
コード例 #9
0
ファイル: _node.py プロジェクト: luk-f/pyCFOFiSAX
    def __init__(self, tree, parent, sax, cardinality):
        """
        Initialization function of the terminalnode class

        :returns: a root node
        :rtype: RootNode
        """

        RootNode.__init__(self, tree=tree, parent=parent,
                          sax=sax, cardinality=cardinality)

        del self.cardinality_next

        """ Specific part of terminal nodes
        (What? We say terminal nodes?) """
        # Variable for BKPT (non-incremental)
        self.bkpt_min, self.bkpt_max = np_array([]), np_array([])

        self.terminal = True
        if parent is None:
            self.level = 0
        else:
            self.level = parent.level + 1

        self.splitable = True
        if np_all(np_array(self.cardinality) >= self.tree.max_card_alphabet) and self.tree.boolean_card_max:
            self.splitable = False

        """ Important, the list of PAA sequences that the tree contains"""
        self.sequences = []
コード例 #10
0
def build_solution_vector(field, axis_list, name="", symbol="", unit="", is_real=True):
    """Build a SolutionVector object

    Parameters
    ----------
    field : ndarray
        a vector vield
    axis_list : list
        a list of SciDataTool axis

    Returns
    -------
    solution: SolutionVector
        a SolutionVector object
    """

    components = {}

    x_data = DataTime(
        name=name,
        unit=unit,
        symbol=symbol + "x",
        axes=axis_list,
        values=field[..., 0],
        is_real=is_real,
    )
    components["comp_x"] = x_data

    y_data = DataTime(
        name=name,
        unit=unit,
        symbol=symbol + "y",
        axes=axis_list,
        values=field[..., 1],
        is_real=is_real,
    )
    components["comp_y"] = y_data

    if field.shape[-1] == 3 and not np_all((field[..., 2] == 0)):
        z_data = DataTime(
            name=name,
            unit=unit,
            symbol=symbol + "z",
            axes=axis_list,
            values=field[..., 2],
            is_real=is_real,
        )
        components["comp_z"] = z_data

    vectorfield = VectorField(name=name, symbol=symbol, components=components)

    solution = SolutionVector(field=vectorfield, label=symbol)

    return solution
コード例 #11
0
ファイル: tools.py プロジェクト: rlcjj/QRToolkits
def is_continue_rpd(dates):
    """
    判断给定的时间序列是否为连续的报告期

    Parameter
    ---------
    dates: iterable
        元素为datetime或者其子类,必须为报告期

    Return
    ------
    result: boolean
    """
    dates = pd.Series(dates).sort_values()
    if not np_all(dates.dt.is_quarter_end):
        raise ValueError('None rpd data in parameter "dates"!')
    continue_dates = pd.date_range(dates.iloc[0], dates.iloc[-1], freq='Q')
    return len(continue_dates) == len(dates)
コード例 #12
0
ファイル: Feature.py プロジェクト: jdkr/ImageFeature
def detectFeaturepoints(
        imageArray,
        resolutionMax,
        scalerangeFactor,
        scalesCount,
        hessianThreshold,
        minFeatureScales):
    ''' Parameter:
        - resolutionMax :: positive Float [MPix]; resolution of the image with highest resolution in the pyramid
        - scalerangeFactor :: positive Float; factor between smallest and largest scale (zoom-level)
        - scalesCount :: postive Int; Number of scales
        - minFeatureScales :: positive Int; Minimum number of scales in which a feature occurs
        - hessianThreshold :: positive Float; Threshold for detection of critical points '''
    start=time()
    (imagePyramid, scales) = calcImagePyramid(imageArray, resolutionMax, scalerangeFactor, scalesCount)
    hessianPyramid = calcHessianPyramid(imagePyramid, scalesCount)
    extrema={'minima':[], 'maxima':[], 'saddle':[]}
    for idxScale in range(scalesCount):
        #sufficient criterion for extrema (Hessematrix positive- or negative-definite). These equivalences apply here: H positive-definite: Ixx>0 and det(H)>0  |  H negative-definite: Ixx<0 and det(H)>0 (see literature: Lindeberg 2015, "Image Matching Using Generalized Scale-Space Interest Points", p.9)
        # TODO: check if an additional threshold for Ixx,Iyy or Ixy brings an improvement
        Ixx=hessianPyramid[idxScale][0]
        detH=hessianPyramid[idxScale][3]
        indicesMinima=where(np_all([detH > hessianThreshold, Ixx > 0], axis=0))
        indicesMaxima=where(np_all([detH > hessianThreshold, Ixx < 0], axis=0))
        indicesSaddle=where(detH < -hessianThreshold)
        extrema['minima'].append(array([indicesMinima[1],indicesMinima[0]]).T)
        extrema['maxima'].append(array([indicesMaxima[1],indicesMaxima[0]]).T)
        extrema['saddle'].append(array([indicesSaddle[1],indicesSaddle[0]]).T)

    print('Time for find extrema: '+str(time()-start))


    start=time()
    # Averaging of the extrema in scale-space over their spans:
    # TODO: Improve averaging of extrema (subpixel interpolation)
    featurepoints=[]
    for category in extrema:
        responseArray=zeros(shape=(scalesCount,)+imagePyramid[0].shape, dtype=float)
        subpixelArray=zeros(shape=(2,scalesCount)+imagePyramid[0].shape, dtype=float)
        labelStructure=generate_binary_structure(3,3)
        # TODO: Does this labelStructure also identifies diagonally adjacent pixels as connected?
        for idxScale in range(scalesCount):
            #pixelcoordinates of the extrema:
            extremaPixel=\
                rint(array([extrema[category][idxScale][:,0],
                            extrema[category][idxScale][:,1]]).T).astype(intp)
            #Transform the pixel-coordinates all into the coordinate-system of the first scale-level and round them to the next integer to get the index in scale-space:
            extremaPixelScaleSpace = \
                array([extrema[category][idxScale][:,0] * scales[0][0] / scales[idxScale][0],
                       extrema[category][idxScale][:,1] * scales[0][1] / scales[idxScale][1],
                       [idxScale] * len(extrema[category][idxScale])]).T #Order: x,y,scale
            extremaPixelScaleSpaceRound=rint(extremaPixelScaleSpace).astype(intp)

            #The feature-response (in this case detH) is set in the responseArray at the rounded indexpositions of the extrema:
            responseArray[
                idxScale,
                extremaPixelScaleSpaceRound[:,1],
                extremaPixelScaleSpaceRound[:,0]] = \
                    hessianPyramid[idxScale][3,extremaPixel[:,1],extremaPixel[:,0]]
            #The unrounded subpixel-coordinates in scaleSpace are stored in the subpixelArray at the rounded indexpositions of the extrema:
            subpixelArray[
                0, #x-direction
                idxScale,
                extremaPixelScaleSpaceRound[:,1],
                extremaPixelScaleSpaceRound[:,0]] = \
                    extremaPixelScaleSpace[:,0]
            subpixelArray[
                1, #y-direction
                idxScale,
                extremaPixelScaleSpaceRound[:,1],
                extremaPixelScaleSpaceRound[:,0]] = \
                    extremaPixelScaleSpace[:,1]

        #Connected Environments in scaleSpace belong to the same feature. This environments are averaged to calculate the featurepositions with subpixel accuracy:
        scaleSpaceExtremaLabeled, featureCount=label(responseArray, labelStructure)
        featureSlices=find_objects(scaleSpaceExtremaLabeled)
        for i in range(featureCount):
            # For each feature, its span is selected from the response array:
            responseArrayFeature=responseArray[featureSlices[i]]
            # Pixel coordinates of the feature environment:
            indicesScale, indices_y, indices_x =indices(responseArrayFeature.shape)
            scale0=featureSlices[i][0].start
            y0    =featureSlices[i][1].start
            x0    =featureSlices[i][2].start
            indicesScale+=scale0
            indices_y+=y0
            indices_x+=x0
            # To average the feature coordinates, do not take the rounded pixels, but the unrounded coordinates from the subpixelArray on the slice of the feature:
            # TODO: Maybe also save scale in subpixelArray instead of using indicesScale?
            coordsX=subpixelArray[0][featureSlices[i]].ravel()
            coordsY=subpixelArray[1][featureSlices[i]].ravel()
            featureNeighborhoodCoordinatesSubpixel=\
                array([indicesScale.ravel(),coordsY.ravel(),coordsX.ravel()]).T
            # Dismiss features which featureScaleRange spans not the minimum number of scales. Such features are to 'weak':
            idxScaleMin=featureNeighborhoodCoordinatesSubpixel[0,0]
            idxScaleMax=featureNeighborhoodCoordinatesSubpixel[-1,0]
            featureScaleRange=idxScaleMax-idxScaleMin+1
            if featureScaleRange<minFeatureScales:
                continue
            # The weights of the individual points in the feature environment are determined by the feature response detH.
            neighborhoodWeights=responseArrayFeature.ravel()
            # TODO: Examine if absolute values ​​of detH is the better weighting.
            # neighborhoodWeights=np_abs(responseArrayFeature).ravel()

            # The coordinates of the feature are finally averaged over the neighborhood:
            coordinatesFeature=average(
                featureNeighborhoodCoordinatesSubpixel, axis=0, weights=neighborhoodWeights)
            scaleFeature=coordinatesFeature[0]
            # The feature coordinates are specified in coordinates of the full-resolution image:
            featurepoint = Featurepoint(
                x=coordinatesFeature[2]/scales[0][0],
                y=coordinatesFeature[1]/scales[0][1],
                scale=scaleFeature,
                category=category)
            featurepoints.append(featurepoint)
    print('Time for detection '+str(len(featurepoints))+' of featurepoints: '+str(time()-start))
    return featurepoints
コード例 #13
0
 def __init__(self,
              volAcid,
              emf,
              tempK,
              pSal,
              volSample,
              totalCarbonate=0,
              totalPhosphate=0,
              totalSilicate=0,
              totalAmmonia=0,
              totalH2Sulfide=0,
              WhichKs=10,
              WhoseKSO4=1,
              WhoseKF=1,
              WhoseTB=2,
              warnings=True,
              checkInputs=True,
              buretteCorrection=1):
     # Check inputs are correctly formatted, if not requested otherwise
     if checkInputs:
         assert type(volAcid) is ndarray, '`volAcid` must be an ndarray.'
         assert type(emf) is ndarray, '`emf` must be an ndarray.'
         assert size(volAcid) == size(emf), \
             '`volAcid` and `emf` must be the same size.'
         assert ((type(tempK) is ndarray and size(tempK) == size(volAcid))
             or isscalar(tempK)), \
             ('`tempK` must be either an ndarray the same size as ' +
              '`volAcid` and `emf` or a scalar.')
         assert np_all(tempK >= 0), 'All `tempK` values must be positive.'
         assert isscalar(pSal) and pSal >= 0, \
             '`pSal` must be a scalar with a positive value.'
         assert isscalar(volSample) and volSample > 0, \
             '`volSample` must be a scalar with a positive value.'
         assert ((type(totalCarbonate) is ndarray and
                 size(totalCarbonate) == size(volAcid)) or
                 isscalar(totalCarbonate)), \
             ('`totalCarbonate` must be either an ndarray the same size ' +
              'as `volAcid` and `emf` or a scalar.')
         assert np_all(totalCarbonate >= 0), \
             'All `totalCarbonate` values must be positive.'
         assert isscalar(totalPhosphate) and totalPhosphate >= 0, \
             '`totalPhosphate` must be a scalar with a positive value.'
         assert isscalar(totalSilicate) and totalSilicate >= 0, \
             '`totalSilicate` must be a scalar with a positive value.'
         assert isscalar(totalAmmonia) and totalAmmonia >= 0, \
             '`totalAmmonia` must be a scalar with a positive value.'
         assert isscalar(totalH2Sulfide) and totalH2Sulfide >= 0, \
             '`totalH2Sulfide` must be a scalar with a positive value.'
         assert isinstance(WhichKs, int) and 1 <= WhichKs <= 15, \
             '`WhichKs` must be a scalar integer from 1 to 15.'
         assert isinstance(WhoseKSO4, int) and 1 <= WhoseKSO4 <= 2, \
             '`WhoseKSO4` must be a scalar integer from 1 to 2.'
         assert isinstance(WhoseKF, int) and 1 <= WhoseKF <= 2, \
             '`WhoseKF` must be a scalar integer from 1 to 2.'
         assert isinstance(WhoseTB, int) and 1 <= WhoseTB <= 2, \
             '`WhoseTB` must be a scalar integer from 1 to 2.'
         assert isinstance(warnings, bool), \
             '`warnings` must be `True` or `False`.'
     # Format and store inputs
     self.volAcid = volAcid.ravel()
     self.emf = emf.ravel()
     if isscalar(tempK):
         tempK = full_like(volAcid, tempK)
     self.tempK = tempK.ravel()
     self.pSal = pSal
     self.volSample = volSample
     self.concTotals = concentrations.concTotals(
         pSal,
         totalCarbonate=totalCarbonate,
         totalPhosphate=totalPhosphate,
         totalSilicate=totalSilicate,
         totalAmmonia=totalAmmonia,
         totalH2Sulfide=totalH2Sulfide,
         WhichKs=WhichKs,
         WhoseTB=WhoseTB)
     self.eqConstants = dissociation.eqConstants(self.tempK,
                                                 pSal,
                                                 self.concTotals,
                                                 WhichKs=WhichKs,
                                                 WhoseKSO4=WhoseKSO4,
                                                 WhoseKF=WhoseKF)
     self.WhichKs = WhichKs
     self.WhoseKSO4 = WhoseKSO4
     self.WhoseKF = WhoseKF
     self.WhoseTB = WhoseTB
     self.__warnings__ = warnings
     # Do calculations
     self.apply_buretteCorrection(buretteCorrection, warning=False)
コード例 #14
0
def solve_FEMM(
    self,
    femm,
    output,
    out_dict,
    FEMM_dict,
    sym,
    Nt,
    angle,
    Is,
    Ir,
    angle_rotor,
    is_close_femm,
    filename=None,
    start_t=0,
    end_t=None,
    Nmess=4,
):
    """
    Solve FEMM model to calculate airgap flux density, torque instantaneous/average/ripple values,
    flux induced in stator windings and flux density, field and permeability maps

    Parameters
    ----------
    self: MagFEMM
        A MagFEMM object
    femm: _FEMMHandler
        Object to handle FEMM
    output: Output
        An Output object
    out_dict: dict
        Dict containing the following quantities to update for each time step:
            Br : ndarray
                Airgap radial flux density (Nt,Na) [T]
            Bt : ndarray
                Airgap tangential flux density (Nt,Na) [T]
            Tem : ndarray
                Electromagnetic torque over time (Nt,) [Nm]
            Phi_wind : list of ndarray # TODO should it rather be a dict with lam label?
                List of winding flux with respect to Machine.get_lamlist (qs,Nt) [Wb]
    FEMM_dict : dict
        Dict containing FEMM model parameters
    sym: int
        Spatial symmetry factor
    Nt: int
        Number of time steps for calculation
    angle: ndarray
        Angle vector for calculation
    Is : ndarray
        Stator current matrix (qs,Nt) [A]
    Ir : ndarray
        Stator current matrix (qs,Nt) [A]
    angle_rotor: ndarray
        Rotor angular position vector (Nt,)
    is_close_femm: bool
        True to close FEMM handler in the end
    filename: str
        Path to FEMM model to open
    start_t: int
        Index of first time step (0 by default, used for parallelization)
    end_t: int
        Index of last time step (Nt by default, used for parallelization)

    Returns
    -------
    B: ndarray
        3D Magnetic flux density for all time steps and each element (Nt, Nelem, 3) [T]
    H : ndarray
        3D Magnetic field for all time steps and each element (Nt, Nelem, 3) [A/m]
    mu : ndarray
        Magnetic relative permeability for all time steps and each element (Nt, Nelem) []
    mesh: MeshMat
        Object containing magnetic mesh at first time step
    groups: dict
        Dict whose values are group label and values are array of indices of related elements

    """

    logger = self.get_logger()
    is_sliding_band = self.is_sliding_band

    if filename is not None:
        # Open FEMM instance if filename is not None (parallel case)
        try:
            # Try to open FEMM instance if handler already exists (first parallelized handler)
            femm.openfemm(1)
        except Exception:
            # Create a new FEMM handler in case of parallelization on another FEMM instance
            femm = _FEMMHandler()
            output.mag.internal.handler_list.append(femm)
            # Open a new FEMM instance associated to new handler
            femm.openfemm(1)
        # Open FEMM file
        femm.opendocument(filename)
    else:
        # FEMM instance and file is already open, get filename from output
        filename = self.get_path_save_fem(output)

    if is_sliding_band and self.is_set_previous:
        # Check result .ans file existence and delete it if it exists
        ans_file = ((splitext(filename)[0] + ".ans").replace("\\",
                                                             "/").replace(
                                                                 "//", "/"))
        if isfile(ans_file):
            logger.debug("Delete existing result .ans file at: " + ans_file)
            remove(ans_file)

    if not is_sliding_band:
        fileinit_fem = self.get_path_save_fem(output)
        fileinit_ans = fileinit_fem[:-4] + ".ans"
        filetemp_fem = fileinit_fem[:-4] + "_temp.fem"
        filetemp_ans = fileinit_fem[:-4] + "_temp.ans"

    # Take last time step at Nt by default
    if end_t is None:
        end_t = Nt

    # Number of angular steps
    Na = angle.size

    # Loading parameters for readibility
    machine = output.simu.machine

    if self.Rag_enforced is not None:
        # Take enforced value
        Rag = self.Rag_enforced
    else:
        Rag = machine.comp_Rgap_mec()

    L1 = machine.stator.comp_length()
    L2 = machine.rotor.comp_length()
    save_path = self.get_path_save(output)
    is_internal_rotor = machine.rotor.is_internal
    if "Phi_wind" in out_dict:
        qs = {}
        Npcp = {}
        for key in out_dict["Phi_wind"].keys():
            lam = machine.get_lam_by_label(key)
            qs[key] = out_dict["Phi_wind"][key].shape[
                1]  # Winding phase number
            Npcp[key] = lam.winding.Npcp  # parallel paths

    # Account for initial angular shift of stator and rotor and apply it to the sliding band
    angle_shift = self.angle_rotor_shift - self.angle_stator_shift

    B_elem = None
    H_elem = None
    mu_elem = None
    meshFEMM = None
    groups = None
    A_node = None

    # Check current values
    if np_all(Is == 0):
        Is = None
    if np_all(Ir == 0):
        Ir = None

    k1 = 0
    k2 = 0
    Nloop = end_t - start_t
    # Compute the data for each time step
    for ii in range(start_t, end_t):

        if Nloop > Nmess:
            if k1 >= round(k2 * Nloop / Nmess):
                logger.info("Solving time steps: " +
                            str(int(k2 / Nmess * 100)) + "%")
                k2 += 1

        elif ii == 0:
            logger.info("Computing Airgap Flux in FEMM")
        k1 += 1

        if not is_sliding_band:
            # Reload model for each time step if no sliding band
            if ii > start_t:
                femm.opendocument(fileinit_fem)
            femm.mi_saveas(filetemp_fem)

        # Update rotor position and currents
        update_FEMM_simulation(
            femm=femm,
            FEMM_dict=FEMM_dict,
            is_sliding_band=is_sliding_band,
            is_internal_rotor=is_internal_rotor,
            angle_rotor=angle_rotor + angle_shift,
            Is=Is,
            Ir=Ir,
            ii=ii,
        )

        # Check if there is a previous solution file to speed up non-linear iterations
        if is_sliding_band and self.is_set_previous and ii > start_t:
            if isfile(ans_file):
                # Setup .ans file path in FEMM model
                femm.mi_setprevious(ans_file, 0)
            else:
                logger.warning("Cannot reuse result .ans file: " + ans_file)
        else:
            # Make sure that no file path is filled in FEMM model
            femm.mi_setprevious("", 0)

        # Run the computation
        femm.mi_analyze()

        # Load results
        femm.mi_loadsolution()

        # Get the flux result
        if is_sliding_band:
            for jj in range(Na):
                (
                    out_dict["B_{rad}"][ii, jj],
                    out_dict["B_{circ}"][ii, jj],
                ) = femm.mo_getgapb("bc_ag2", angle[jj] * 180 / pi)
        else:
            for jj in range(Na):
                B = femm.mo_getb(Rag * cos(angle[jj]), Rag * sin(angle[jj]))
                out_dict["B_{rad}"][
                    ii, jj] = B[0] * cos(angle[jj]) + B[1] * sin(angle[jj])
                out_dict["B_{circ}"][
                    ii, jj] = -B[0] * sin(angle[jj]) + B[1] * cos(angle[jj])

        # Compute the torque
        out_dict["Tem"][ii] = comp_FEMM_torque(femm, FEMM_dict, sym=sym)

        if "Phi_wind" in out_dict:
            # Phi_wind computation
            # TODO fix inconsistency for multi lam machines here
            for key in out_dict["Phi_wind"].keys():
                out_dict["Phi_wind"][key][ii, :] = comp_FEMM_Phi_wind(
                    femm,
                    qs[key],
                    Npcp[key],
                    is_stator=machine.get_lam_by_label(key).is_stator,
                    L1=L1,
                    L2=L2,
                    sym=sym,
                )

        # Load mesh data & solution
        if self.is_get_meshsolution:
            # Get mesh data and magnetic quantities from .ans file
            tmpmeshFEMM, tmpB, tmpH, tmpmu, tmpA, tmpgroups = self.get_meshsolution(
                femm,
                save_path,
                j_t0=ii,
                id_worker=start_t,
                is_get_mesh=ii == start_t,
            )

            # Store magnetic flux density, field and relative permeability for the current time step

            # Initialize mesh and magnetic quantities for first time step
            if ii == start_t:
                meshFEMM = [tmpmeshFEMM]
                groups = tmpgroups
                Nelem = meshFEMM[0].cell["triangle"].nb_cell
                Nnode = meshFEMM[0].node.nb_node
                Nt0 = end_t - start_t
                B_elem = zeros([Nt0, Nelem, 3])
                H_elem = zeros([Nt0, Nelem, 3])
                mu_elem = zeros([Nt0, Nelem])
                A_node = zeros([Nt0, Nnode])

            # Shift time index ii in case start_t is not 0 (parallelization)
            ii0 = ii - start_t

            B_elem[ii0, :, 0:2] = tmpB
            H_elem[ii0, :, 0:2] = tmpH
            mu_elem[ii0, :] = tmpmu
            A_node[ii0, :] = tmpA

        if not is_sliding_band:
            femm.mi_close()
            femm.mo_close

    if Nloop > Nmess and Nt > 1 and k2 <= Nmess:
        logger.info("Solving time step: 100%")

    # Shift to take into account stator position
    if self.angle_stator_shift != 0:
        roll_id = int(self.angle_stator_shift * Na / (2 * pi))
        out_dict["B_{rad}"] = roll(out_dict["B_{rad}"], roll_id, axis=1)
        out_dict["B_{circ}"] = roll(out_dict["B_{circ}"], roll_id, axis=1)

    if not is_sliding_band:
        # Remove initial .fem
        if isfile(fileinit_fem):
            remove(fileinit_fem)
        # Remove initial .fem
        if isfile(fileinit_ans):
            remove(fileinit_ans)
        # Rename .fem and .ans files to initial names
        rename(filetemp_fem, fileinit_fem)
        rename(filetemp_ans, fileinit_ans)

    # Close FEMM handler
    if is_close_femm:
        femm.closefemm()
        output.mag.internal.handler_list.remove(femm)

    out_dict["Rag"] = Rag

    return B_elem, H_elem, mu_elem, A_node, meshFEMM, groups
コード例 #15
0
ファイル: location.py プロジェクト: kkvishal/wlan-pos
def fixPosWLAN(len_wlan=None, wlan=None, wppdb=None, verb=False):
    """
    Returns the online fixed user location in lat/lon format.
    
    Parameters
    ----------
    len_wlan: int, mandatory
        Number of online visible WLAN APs.
    wlan: np.array, string list, mandatory
        Array of MAC/RSS for online visible APs.
        e.g. [['00:15:70:9E:91:60' '00:15:70:9E:91:61' '00:15:70:9E:91:62' '00:15:70:9E:6C:6C']
              ['-55' '-56' '-57' '-68']]. 
    verb: verbose mode option, default: False
        More debugging info if enabled(True).
    
    Returns
    -------
    posfix: np.array, float
        Final fixed location(lat, lon).
        e.g. [ 39.922942  116.472673 ]
    """
    interpart_offline = False; interpart_online = False

    # db query result: [ maxNI, keys:[ [keyaps:[], keycfps:(())], ... ] ].
    # maxNI=0 if no cluster found.
    maxNI,keys = wppdb.getBestClusters(macs=wlan[0])
    #maxNI,keys = [2, [
    #    [['00:21:91:1D:C0:D4', '00:19:E0:E1:76:A4', '00:25:86:4D:B4:C4'], 
    #        [[5634, 5634, 39.898019, 116.367113, '-83|-85|-89']] ],
    #    [['00:21:91:1D:C0:D4', '00:25:86:4D:B4:C4'],
    #        [[6161, 6161, 39.898307, 116.367233, '-90|-90']] ] ]]
    if maxNI == 0: # no intersection found
        wpplog.error('NO cluster found! Fingerprinting TERMINATED!')
        return []
    elif maxNI < CLUSTERKEYSIZE:
        # size of intersection set < offline key AP set size:4, 
        # offline keymacs/keyrsss (not online maxmacs/maxrsss) need to be cut down.
        interpart_offline = True
        if maxNI < len_wlan: #TODO: TBE.
            # size of intersection set < online AP set size(len_wlan) < CLUSTERKEYSIZE,
            # not only keymacs/keyrsss, but also maxmacs/maxrsss need to be cut down.
            interpart_online = True
        if verb: wpplog.debug('Partly[%d] matched cluster(s) found:' % maxNI)
    else: 
        if verb: wpplog.debug('Full matched cluster(s) found:')
    if verb: wpplog.debug('keys:\n%s' % keys)

    # Evaluation|sort of similarity between online FP & radio map FP.
    # fps_cand: [ min_spid1:[cid,spid,lat,lon,rsss], min_spid2, ... ]
    # keys: ID and key APs of matched cluster(s) with max intersect APs.
    all_pos_lenrss = []
    fps_cand = []; sums_cand = []
    for keyaps,keycfps in keys:
        if verb: wpplog.debug('keyaps:\n%s\nkeycfps:\n%s' % (keyaps, keycfps))
        # Fast fix when the ONLY 1 selected cid has ONLY 1 fp in 'cfps'.
        if len(keys)==1 and len(keycfps)==1:
            fps_cand = [ list(keycfps[0]) ]
            break
        pos_lenrss = (array(keycfps)[:,1:3].astype(float)).tolist()
        keyrsss = np_char_array(keycfps)[:,4].split('|') #4: column order in cfps.tbl
        keyrsss = array([ [float(rss) for rss in spid] for spid in keyrsss ])
        for idx,pos in enumerate(pos_lenrss):
            pos_lenrss[idx].append(len(keyrsss[idx]))
        all_pos_lenrss.extend(pos_lenrss)
        # Rearrange key MACs/RSSs in 'keyrsss' according to intersection set 'keyaps'.
        if interpart_offline:
            if interpart_online:
                wl = deepcopy(wlan) # mmacs->wl[0]; mrsss->wl[1]
                idxs_inters = [ idx for idx,mac in enumerate(wlan[0]) if mac in keyaps ]
                wl = wl[:,idxs_inters]
            else: wl = wlan
        else: wl = wlan
        idxs_taken = [ keyaps.index(x) for x in wl[0] ]
        keyrsss = keyrsss.take(idxs_taken, axis=1)
        mrsss = wl[1].astype(int)
        # Euclidean dist solving and sorting.
        sum_rss = np_sum( (mrsss-keyrsss)**2, axis=1 )
        fps_cand.extend( keycfps )
        sums_cand.extend( sum_rss )
        if verb: wpplog.debug('sum_rss:\n%s' % sum_rss)

    # Location estimation.
    if len(fps_cand) > 1:
        # KNN
        # lst_set_sums_cand: list format for set of sums_cand.
        # bound_dist: distance boundary for K-min distances.
        lst_set_sums_cand =  array(list(set(sums_cand)))
        idx_bound_dist = argsort(lst_set_sums_cand)[:KNN][-1]
        bound_dist = lst_set_sums_cand[idx_bound_dist]
        idx_sums_sort = argsort(sums_cand)

        sums_cand = array(sums_cand)
        fps_cand = array(fps_cand)

        sums_cand_sort = sums_cand[idx_sums_sort]
        idx_bound_fp = searchsorted(sums_cand_sort, bound_dist, 'right')
        idx_sums_sort_bound = idx_sums_sort[:idx_bound_fp]
        #idxs_kmin = argsort(min_sums)[:KNN]
        sorted_sums = sums_cand[idx_sums_sort_bound]
        sorted_fps = fps_cand[idx_sums_sort_bound]
        if verb: wpplog.debug('k-dists:\n%s\nk-locations:\n%s' % (sorted_sums, sorted_fps))
        # DKNN
        if sorted_sums[0]: 
            boundry = sorted_sums[0]*KWIN
        else: 
            if sorted_sums[1]:
                boundry = KWIN
                # What the hell are the following two lines doing here!
                #idx_zero_bound = searchsorted(sorted_sums, 0, side='right')
                #sorted_sums[:idx_zero_bound] = boundry / (idx_zero_bound + .5)
            else: boundry = 0
        idx_dkmin = searchsorted(sorted_sums, boundry, side='right')
        dknn_sums = sorted_sums[:idx_dkmin].tolist()
        dknn_fps = sorted_fps[:idx_dkmin]
        if verb: wpplog.debug('dk-dists: \n%s\ndk-locations: \n%s' % (dknn_sums, dknn_fps))
        # Weighted_AVG_DKNN.
        num_dknn_fps = len(dknn_fps)
        if  num_dknn_fps > 1:
            coors = dknn_fps[:,1:3].astype(float)
            num_keyaps = array([ rsss.count('|')+1 for rsss in dknn_fps[:,-2] ])
            # ww: weights of dknn weights.
            ww = np_abs(num_keyaps - len_wlan).tolist()
            #wpplog.debug(ww)
            if not np_all(ww):
                if np_any(ww):
                    ww_sort = np_sort(ww)
                    #wpplog.debug('ww_sort: %s' % ww_sort)
                    idx_dknn_sums_sort = searchsorted(ww_sort, 0, 'right')
                    #wpplog.debug('idx_dknn_sums_sort: %s' % idx_dknn_sums_sort)
                    ww_2ndbig = ww_sort[idx_dknn_sums_sort] 
                    w_zero = ww_2ndbig / (len(ww)*ww_2ndbig)
                else: w_zero = 1
                #for idx,sum in enumerate(ww):
                #    if not sum: ww[idx] = w_zero
                ww = [ w if w else w_zero for w in ww ]
            ws = array(ww) + dknn_sums
            weights = reciprocal(ws)
            if verb: wpplog.debug('coors:%s, weights:%s' % (coors, weights))
            posfix = average(coors, axis=0, weights=weights)
        else: posfix = array(dknn_fps[0][1:3]).astype(float)
        # ErrRange Estimation (more than 1 relevant clusters).
        idxs_clusters = idx_sums_sort_bound[:idx_dkmin]
        if len(idxs_clusters) == 1: 
            if maxNI == 1: poserr = 200
            else: poserr = 100
        else: 
            if verb: wpplog.debug('idxs_clusters: %s\nall_pos_lenrss: %s' % (idxs_clusters, all_pos_lenrss))
            #allposs_dknn = vstack(array(all_pos_lenrss, object)[idxs_clusters])
            allposs_dknn = array(all_pos_lenrss, object)[idxs_clusters]
            if verb: wpplog.debug('allposs_dknn: %s' % allposs_dknn)
            poserr = max( average([ dist_km(posfix[1], posfix[0], p[1], p[0])*1000 
                for p in allposs_dknn ]), 100 )
    else: 
        fps_cand = fps_cand[0][:-2]
        if verb: wpplog.debug('location:\n%s' % fps_cand)
        posfix = array(fps_cand[1:3]).astype(float)
        # ErrRange Estimation (only 1 relevant clusters).
        N_fp = len(keycfps)
        if N_fp == 1: 
            if maxNI == 1: poserr = 200
            else: poserr = 150
        else:
            if verb: 
                wpplog.debug('all_pos_lenrss: %s' % all_pos_lenrss)
                wpplog.debug('posfix: %s' % posfix)
            poserr = max( np_sum([ dist_km(posfix[1], posfix[0], p[1], p[0])*1000 
                for p in all_pos_lenrss ]) / (N_fp-1), 100 )
    ret = posfix.tolist()
    ret.append(poserr)

    return ret
コード例 #16
0
def comp_force(self, output, axes_dict):
    """Compute the air-gap surface force based on Maxwell Tensor (MT).

    Parameters
    ----------
    self : ForceMT
        A ForceMT object
    output : Output
        an Output object (to update)
    axes_dict: {Data}
        Dict of axes used for force calculation
    """

    # Get time and angular axes
    Angle = axes_dict["Angle"]
    Time = axes_dict["Time"]

    # Import angular vector from Angle Data object
    _, is_antiper_a = Angle.get_periodicity()
    angle = Angle.get_values(
        is_oneperiod=self.is_periodicity_a,
        is_antiperiod=is_antiper_a and self.is_periodicity_a,
    )

    # Import time vector from Time Data object
    _, is_antiper_t = Time.get_periodicity()
    time = Time.get_values(
        is_oneperiod=self.is_periodicity_t,
        is_antiperiod=is_antiper_t and self.is_periodicity_t,
    )

    # Load magnetic flux
    Brphiz = output.mag.B.get_rphiz_along(
        "time=axis_data",
        "angle=axis_data",
        axis_data={
            "time": time,
            "angle": angle
        },
    )
    Br = Brphiz["radial"]
    Bt = Brphiz["tangential"]
    Bz = Brphiz["axial"]

    # Magnetic void permeability
    mu_0 = 4 * pi * 1e-7

    # Compute AGSF with MT formula
    Prad = (Br * Br - Bt * Bt - Bz * Bz) / (2 * mu_0)
    Ptan = Br * Bt / mu_0
    Pz = Br * Bz / mu_0

    # Store Maxwell Stress tensor P in VectorField
    # Build axes list
    axes_list = list()
    for axe in output.mag.B.get_axes():
        if axe.name == Angle.name:
            axes_list.append(Angle)
        elif axe.name == Time.name:
            axes_list.append(Time)
        else:
            axes_list.append(axe)

    # Build components list
    components = {}
    if not np_all((Prad == 0)):
        Prad_data = DataTime(
            name="Airgap radial surface force",
            unit="N/m2",
            symbol="P_r",
            axes=axes_list,
            values=Prad,
        )
        components["radial"] = Prad_data
    if not np_all((Ptan == 0)):
        Ptan_data = DataTime(
            name="Airgap tangential surface force",
            unit="N/m2",
            symbol="P_t",
            axes=axes_list,
            values=Ptan,
        )
        components["tangential"] = Ptan_data
    if not np_all((Pz == 0)):
        Pz_data = DataTime(
            name="Airgap axial surface force",
            unit="N/m2",
            symbol="P_z",
            axes=axes_list,
            values=Pz,
        )
        components["axial"] = Pz_data

    # Store components in VectorField
    output.force.P = VectorField(name="Magnetic airgap surface force",
                                 symbol="P",
                                 components=components)
コード例 #17
0
def comp_periodicity(self, wind_mat=None):
    """Computes the winding matrix (anti-)periodicity

    Parameters
    ----------
    self : Winding
        A Winding object
    wind_mat : ndarray
        Winding connection matrix

    Returns
    -------
    per_a: int
        Number of spatial periods of the winding
    is_aper_a: bool
        True if the winding is anti-periodic over space

    """

    if wind_mat is None:
        wind_mat = self.get_connection_mat()

    assert wind_mat.ndim == 4, "dim 4 expected for wind_mat"

    Zs = wind_mat.shape[2]  # Number of Slot
    qs = wind_mat.shape[3]  # Number of phase

    # Summing on all the layers (Nlay_r and Nlay_theta)
    wind_mat2 = squeeze(wind_mat)

    if wind_mat2.ndim == 4:  # rad and tan > 2
        Nlay = wind_mat2.shape[0] * wind_mat2.shape[1]
        wind_mat2 = wind_mat2.reshape((Nlay, Zs, qs))
    elif wind_mat2.ndim == 2:
        Nlay = 1
        wind_mat2 = wind_mat2[None, :, :]
    else:
        Nlay = wind_mat2.shape[0]

    Nperw = Zs  # Number of electrical period of the winding
    is_aper = True  # True if winding pattern is anti-periodic

    # Looking for the periodicity of each phase
    for q in range(qs):
        # Looking for the periodicity of each layer
        for l in range(Nlay):
            # FFT of connectivity array for the given layer and phase
            wind_mat_ql_fft = fft(wind_mat2[l, :, q])
            # Find indices of nonzero amplitudes
            I0 = where(np_abs(wind_mat_ql_fft) > 1e-3)[0]
            if len(I0) == 0:  # This phase is not present in this layer
                pass  # No impact on symmetry
            else:
                # Periodicity is given by the non zero lowest order
                Nperw_ql = I0[0] if I0[0] != 0 else I0[1]
                Nperw = gcd(Nperw, Nperw_ql)
                if I0[0] == 0:
                    # Anti-periodicity is necessary false if there is a constant component
                    is_aper = False
                else:
                    # Anti-periodicity is true if all non-zero components are odd multiple of periodicity
                    is_aper = is_aper and np_all(mod(I0 / Nperw_ql, 2) == 1)

            if Nperw == 1 and not is_aper:
                # No need to further continue if there is no anti-periodicity
                break

    # Multiply periodicity number by two in case of anti-periodicity
    Nperw = Nperw * 2 if is_aper else Nperw

    return int(Nperw), bool(is_aper)
コード例 #18
0
def compute_intersect_edges_sphere_np(verts_in, edges_in, sphere_loc, radius,
                                      result, gates):
    '''
        Calculate all intersections of a sphere with one edges mesh with NumPy and in case of none intersection returns closest point of line and over the sphere.
        Adapted from Marco13 answer in https://math.stackexchange.com/questions/1905533/
        segments are calculated from verts_in and edges_in (regular lists
        sphere_loc and radius as regular lists or tuples
        result as a [[], [], [], [], [], [], []] to append the data
        and gates as a boolean list to return:
            [mask: valid intersection,
            inter_a: the intersection nearer to the end point of the segment,
            inter_b: the intersection nearer to the start point of the segment,
            inter_a_in_segment: if A intersection is over the segment,
            inter_b_in_segment: if B intersection is over the segment,
            first_inter_in_segment: returns the first valid value between Int. A, Int. B and Closest point,
            inter_with_segment: returns true if there is any intersection in the segment
            all_inter: returns a flat list of all the intersections
            out_numpy: return NumPy arrays or regular lists]
    '''

    np_verts = array(verts_in)
    if not edges_in:
        edges_in = [[0, -1]]
    np_edges = array(edges_in)
    np_centers = array(sphere_loc)
    np_rad = array(radius)

    segment_orig = np_verts[np_edges[:, 0]]
    segment = np_verts[np_edges[:, 1]] - segment_orig
    segment_mag = np_norm(segment, axis=1)
    segment_dir = segment / segment_mag[:, newaxis]

    join_vect = np_centers[:, newaxis] - segment_orig
    join_vect_proy = np_sum(join_vect * segment_dir, axis=2)

    closest_point = segment_orig + join_vect_proy[:, :, newaxis] * segment_dir
    dif_v = closest_point - np_centers[:, newaxis, :]
    dist = np_norm(dif_v, axis=2)

    mask = dist > np_rad[:, newaxis]
    ang = arccos(dist / np_rad[:, newaxis])
    offset = np_rad[:, newaxis] * sin(ang)

    inter_a, inter_b = [], []
    inter_a_in_segment, inter_b_in_segment = [], []
    first_inter_in_segment, inter_with_segment = [], []
    all_inter = []
    any_inter = any(gates[5:8])

    if gates[1] or any_inter:
        inter_a = closest_point + segment_dir * offset[:, :, newaxis]
        inter_a[mask] = closest_point[mask]
    if gates[2] or any_inter:
        inter_b = closest_point - segment_dir * offset[:, :, newaxis]
        inter_b[mask] = closest_point[mask]

    if gates[3] or any_inter:
        inter_a_in_segment = np_all(
            [
                join_vect_proy + offset >= 0,
                join_vect_proy + offset <= segment_mag
            ],
            axis=0,
        )
    if gates[4] or any_inter:
        inter_b_in_segment = np_all(
            [
                join_vect_proy - offset >= 0,
                join_vect_proy - offset <= segment_mag
            ],
            axis=0,
        )

    if gates[5]:
        first_inter_in_segment = closest_point
        first_inter_in_segment[inter_b_in_segment] = inter_b[
            inter_b_in_segment]
        first_inter_in_segment[inter_a_in_segment] = inter_a[
            inter_a_in_segment]

    if gates[6]:
        inter_with_segment = np_any([inter_a_in_segment, inter_b_in_segment],
                                    axis=0)

    if gates[7]:
        all_inter = concatenate(
            (inter_a[inter_a_in_segment, :], inter_b[inter_b_in_segment, :]),
            axis=0)[newaxis, :, :]

    local_result = [
        invert(mask),
        inter_a,
        inter_b,
        inter_a_in_segment,
        inter_b_in_segment,
        first_inter_in_segment,
        inter_with_segment,
        all_inter,
    ]
    for i, res in enumerate(result):
        if gates[i]:
            if not gates[8]:

                for subres in local_result[i].tolist():
                    res.append(subres)

            else:
                res.append(local_result[i])
コード例 #19
0
num_data = query('CLOSE', (start_time, end_time))
char_data = query('ZX_IND', (start_time, end_time))
unstruct_data = list(range(1000))

db.insert(num_data, 'num_test',
          (DataClassification.STRUCTURED, DataValueCategory.NUMERIC,
           DataFormatCategory.PANEL), 'float64')
db.insert(char_data, 'char_test',
          (DataClassification.STRUCTURED, DataValueCategory.CHAR,
           DataFormatCategory.PANEL))
db.insert(unstruct_data, 'unstruct_data.test',
          (DataClassification.UNSTRUCTURED, ))

query_start = '2017-05-01'
query_end = '2017-12-06'

queryed_num_data = db.query(
    'num_test', (DataClassification.STRUCTURED, DataValueCategory.NUMERIC,
                 DataFormatCategory.PANEL), query_start, query_end)
old_num_data = query('CLOSE', (query_start, query_end))
print(np_all(np_isclose(queryed_num_data.fillna(0), old_num_data.fillna(0))))

queryed_char_data = db.query(
    'char_test', (DataClassification.STRUCTURED, DataValueCategory.CHAR,
                  DataFormatCategory.PANEL), query_start, query_end)
old_char_data = query('ZX_IND', (query_start, query_end))
print(np_all(queryed_char_data == old_char_data))

print(db.query('unstruct_data.test', (DataClassification.UNSTRUCTURED, )))