Beispiel #1
0
class Child(HasTraits):
    __prefix__ = "child_"
    first_name = PrototypedFrom("mother", "favorite_*")
    last_name = PrototypedFrom("father", "family_name")
    allowance = PrototypedFrom("father", "*")
    father = Instance(Parent)
    mother = Instance(Parent)
Beispiel #2
0
class Child(HasTraits):
    __prefix__ = 'child_'
    first_name = PrototypedFrom('mother', 'favorite_*')
    last_name = PrototypedFrom('father', 'family_name')
    allowance = PrototypedFrom('father', '*')
    father = Instance(Parent)
    mother = Instance(Parent)
Beispiel #3
0
class SgnTest(HasTraits):
    
    figure = Instance(Figure, ())
    exp_list = Instance(HasTraits)
    trace_list = PrototypedFrom( 'exp_list' )
    
    view = View(
        Item('figure', editor=MPLFigureEditor(), show_label=False))
    
    def _get_trace_list(self):
        return [ pkl_trace.TraceDBFile(trace_path) for trace_path in self.trace_list ]
        
    def update(self):
        self.figure.clf()
        axes = self.figure.add_subplot(111)
        trace_list = self._get_trace_list()
#        
#        key_pair_list = [ 
#            ('salad_risk.tst', 'argmin_risk.tst' ), 
#            ('salad_risk.greedy-05.tst', 'argmin_risk.tst' ),
#            ]
        
        key_pair_list = [ 
#             ('salad_risk.greedy-05.tst', 'salad_risk.tst' ), 
#             ('salad_risk.greedy-10.tst', 'salad_risk.tst' ),
            ('salad_risk.tst', 'argmin_risk.tst' ), 
#             ('salad_risk.greedy-10.tst', 'argmin_risk.tst' ),
            ]
        
        plot_sign_test(trace_list,key_pair_list, axes=axes)
        self.figure.canvas.draw()
Beispiel #4
0
class TraceSelector(HasTraits):
    
    exp_list = Instance(HasTraits)
    trace_list = PrototypedFrom( 'exp_list' )
    trace_path = Enum(values='trace_list')

    def update(self):
        self.selected_trace()

#    @on_trait_change('trace_list')
#    def trace_list_changed(self):
##        print 'setting trace path to ', self.trace_list[0]
#        self.trace_path = self.trace_list[0]
#        
        
    @on_trait_change('trace_path')
    def selected_trace(self):
        if getattr(self, 'last_trace',None ) != self.trace_path:
            self.last_trace = self.trace_path
            trace = pkl_trace.TraceDBFile(self.trace_path)
            self.new_trace(trace)

    view = View(
        Item('trace_path', style='simple', width=-20),
        resizable=True)
class Child(Person):

    parent = Instance(Person, args=())
    last_name = PrototypedFrom('parent', 'last_name')

    def _last_name_changed(self, old, new):
        print "child's last name changed:", new
Beispiel #6
0
class Child(HasTraits):

    mother = Instance(Parent)
    father = Instance(Parent)

    first_name = Str()
    last_name = PrototypedFrom('father')
Beispiel #7
0
class Child ( HasTraits ):

    father = Instance( Parent )
    first_name = Str
    last_name  = PrototypedFrom( 'father' )

    def _last_name_changed(self, new):
        print "Child's last name changed to %s." % new
Beispiel #8
0
class Reservoir(HasTraits):
    name = Str
    max_storage = Float(1e6, desc='Maximal storage [hm3]')
    max_release = Float(10, desc='Maximal release [m3/s]')
    head = Float(10, desc='Hydraulic head [m]')
    efficiency = Range(0, 1.)

    turbine = Instance(Turbine)
    installed_capacity = PrototypedFrom('turbine', 'power')
Beispiel #9
0
class Reservoir(HasTraits):

    name = Str
    #    max_storage = Float(100, desc='description for trait float variable')
    max_storage = Float(1e6, desc='Maximal storage [hm3]')
    max_release = Float(10, desc='Maximal release [m3/s]')
    head = Float(10, desc='Hydraulic head [m]')
    efficiency = Range(0, 1.)
    irrigated_areas = List(IrrigationArea)

    turbine = Instance(Turbine)
    installed_capacity = PrototypedFrom('turbine', 'power')

    total_crop_surface = Property(depends_on='irrigated_areas.surface')

    def _get_total_crop_surface(self):
        return sum([iarea.surface for iarea in self.irrigated_areas])

    def energy_production(self, release):
        ''' Returns the energy production [Wh] for the given release [m3/s]
        '''
        power = 1000 * 9.81 * self.head * release * self.efficiency
        return power * 3600

    traits_view = View(Item('name'),
                       Item('max_storage'),
                       Item('max_release'),
                       Item('head'),
                       Item('efficiency'),
                       Item('irrigated_areas'),
                       Item('total_crop_surface'),
                       resizable=True)

    traits_view2 = View(
        'name',
        'max_storage',
        'max_release',
        'head',
        'efficiency',
        title='Reservoir',
        resizable=True,
    )

    def _name_default(self):
        """ Complex initialisation of the reservoir name. """
        return 'Undefined'

    def _max_storage_default(self):
        """ Complex initialisation of the reservoir name. """
        return 123456
Beispiel #10
0
class ListItemProxy(HasTraits):

    #: The list proxy:
    list = Property()

    #: The item proxies index into the original list:
    index = Int()

    #: Delegate all other traits to the original object:
    _ = PrototypedFrom("_zzz_object")

    #: Define all of the private internal use values (the funny names are an
    #: attempt to avoid name collisions with delegated trait names):
    _zzz_inited = Any()
    _zzz_object = Any()
    _zzz_name = Any()

    def __init__(self, object, name, index, trait, value):
        super().__init__()

        self._zzz_inited = False
        self._zzz_object = object
        self._zzz_name = name
        self.index = index

        if trait is not None:
            self.add_trait("value", trait)
            self.value = value

        self._zzz_inited = self.index < len(self.list)

    def _get_list(self):
        return getattr(self._zzz_object, self._zzz_name)

    def _value_changed(self, old_value, new_value):
        if self._zzz_inited:
            self.list[self.index] = new_value
Beispiel #11
0
class Viz3D(HasStrictTraits):
    '''Base class for visualization objects.
    Each state and operator objects like crease pattern
    or constraint can define provide tailored visualizations
    transferring the information into a view objects shared
    within a particular forming task or a whole forming process.
    '''

    label = Str('default')
    '''Label of the visualization object.
    '''

    anim_t_start = PrototypedFrom('vis3d')
    anim_t_end = PrototypedFrom('vis3d')

    order = Int(1, auto_set=False, enter_set=True)
    '''Deprecated -- is only here to have a control parameter
    that avoids text visualization at the beginning of the time line
    because then mlab fails. 
    '''

    vis3d = WeakRef(Visual3D)
    '''Link to the visual object to transform into the 
    forming_task_view3d.
    '''

    vis3d_changed = Event
    '''Event registering changes in the source object.
    '''

    ftv = WeakRef
    '''Folding task view3d object. 
    '''

    pipes = Dict()

    def register(self, ftv):
        '''Construct the visualization within the forming task view3d object.
        '''
        ftv.viz3d_dict[self.label] = self
        return

    def plot(self):
        '''Plot the object within ftv
        '''
        return

    hidden = Bool(False)

    def _show(self):
        if self.hidden == True:
            self.show()
            self.hidden = False

    def _hide(self):
        if self.hidden == False:
            self.hide()
            self.hidden = True

    def hide(self):
        for pipe in list(self.pipes.values()):
            pipe.visible = False

    def show(self):
        for pipe in list(self.pipes.values()):
            pipe.visible = True

    def update_t(self, anim_t=0.0, vot=0.0):
        '''Update with regard to the global time line.
        '''
        if anim_t >= self.anim_t_start and anim_t <= self.anim_t_end \
                or self.anim_t_end < 0.0:
            self._show()
            self.update(vot)
        else:
            self._hide()

    def update(self, vot=0.0):
        '''Update the visualization within the view3d object.
        '''
        return

    min_max = Property
    '''Bounding box limits set to none by default. 
    '''

    def _get_min_max(self):
        return None, None

    viz3d_view = tui.Group(tui.Item('hode', resizable=True))
Beispiel #12
0
class Transition(HasTraits):
    # When based on Pandas, do we even need this class as anything but a viewer
    # class? I don't think so.
    # ...but it's a pretty good viewer class, that is something too.
    """ A Transition-class to keep track of lines/components for a given
    transition defined by its species and wavelength, with possible common
    aliases like e.g. H-Alpha
    """
    # We need some information about the spectrum we're working with:
    spectrum = Instance(Spectrum2D)
    wavelength = PrototypedFrom('spectrum', prefix='Center')
    model = PrototypedFrom('spectrum')
    line_spectra = DelegatesTo('spectrum')
    Center = DelegatesTo('spectrum')
    wavl = DelegatesTo('spectrum')
    CompMarkers = Dict()
    from_existing = Bool(False)
    fit_now = Bool(False)
    transit_list = DelegatesTo('spectrum')
    select_existing = Str
    Builddatabutton = Button  # For debugging only
    z = DelegatesTo('spectrum')  # Or what? How to handle this?
    exes = []
    wyes = []
    cens = []
    wids = []
    amps = []

    def _lookup_nearby_lines(self):
        ''' Still need to figure out how best to do this.
        '''
        try:
            from astroquery.atomic import AtomicLineList, Transition
        except:
            raise ImportError("Could not find a working `astroquery` install.")
        labwl = self.wavelength / (1. + self.z)
        query_range = [(labwl - 5.) * u.AA, (labwl + 5.) * u.AA]
        try:
            Q = AtomicLineList.query_object(query_range,
                                            'AIR',
                                            transitions=Transition.nebular,
                                            nmax=4)
        except:
            raise LookupError("Could not perform line lookup or found nothing")
        Q = pd.DataFrame(Q.as_array())
        Q.SPECTRUM += \
            '_' + Q['LAMBDA AIR ANG'].map(np.around).astype(int).astype(str)
        Q['Lambda_0'] = air_to_vacuum(Q['LAMBDA AIR ANG'])
        Q = Q[['SPECTRUM', 'Lambda_0']]
        return

    def _build_trans_list(self):
        """
        When given the Center wavelength value, find the nearby transitions
        in the system transitions list and let user choose which one to add to
        the user's transition list.
        """
        lines_srs = load_lines_series()
        #lookup_success = False
        #try:
        #    Loclines = self._lookup_nearby_lines()
        #    lines_srs.merge(Loclines, left_index=True, right_on='SPECTRUM')
        lines_selection = pd.DataFrame(lines_srs, columns=['Lambda_0'])
        lines_selection['Lambda_obs'] = \
            lines_selection['Lambda_0'] * (1. + self.z)
        lines = lines_selection[sp.absolute(lines_selection['Lambda_obs'] -
                                            self.wavelength) <= 35.]
        # Turn this into a list of strings that Enum understands
        choices = [
            '%s  (%.2f)' % (i, lines.ix[i]['Lambda_obs']) for i in lines.index
        ]
        if len(choices) > 0:
            self.add_trait('choices', Enum(choices))
            return True
        else:
            choices.append('')
            self.add_trait('choices', Enum(choices))
            print 'No lines listed in this neighbourhood.'
            return False

    def _z_changed(self):
        # TODO: Implement!
        pass

    def show_trans_model(self):
        self._build_plot_data()
        # This to be a conv function rather than a method? I think possibly so.
        # Main functionality of this can just be a View() in Show2DSpec. But
        # should it?

    def __init__(self, spectrum=None, z=0.):
        if spectrum is not None:
            super(Transition, self).__init__(spectrum=spectrum)
        self.Succes = self._build_trans_list()

    # =========================================================================
    #     Define the plot that is the main part of the components editor. This
    #     requires an ArrayPlotData object to be defined, which again requires
    #     a bunch of data arrays to be defined and imported from the parent
    #     objects, so this may take a little while to implement.
    #     FIXME: Possibly remove, as most is now in the 2d viewer class.

    container = GridContainer(padding=30,
                              bgcolor="sys_window",
                              spacing=(15, 15),
                              shape=(3, 1))

    transitiondata = ArrayPlotData(wl=wavl, )

    # =========================================================================
    #     This class has two different views called at two different points in
    #     the data reduction - one before and one after fitting.

    view = View(Item('choices'),
                Item('wavelength'),
                Item('line_spectra'),
                buttons=ModalButtons)

    Choose = View(Group(
        HGroup(
            Item('choices', label='Select line'),
            Item('from_existing'),
            Item('select_existing',
                 enabled_when='from_existing',
                 editor=EnumEditor(name='transit_list')),
            Item('fit_now', enabled_when='from_existing'),
        ),
        Item('z', label='Redshift'),
        show_border=True,
    ),
                  buttons=LiveButtons,
                  kind='livemodal',
                  title='Pychelle - add new transition')
Beispiel #13
0
class InvoluteFace(Face):
    name = "Cylindical Involute Face"
    length = PrototypedFrom("owner")
    width = PrototypedFrom("owner")
    tube_radius = PrototypedFrom("owner")
    begin_angle = PrototypedFrom("owner")
    end_angle = PrototypedFrom("owner")
    resolution = PrototypedFrom("owner")
    max_length = PrototypedFrom("owner")

    intel_guess = numpy.array([0])

    #length = 100.
    #width = 30.
    #tube_radius = 6.
    #begin_angle = 0.
    #end_angle = 500.
    #resolution = 30
    #max_length = 100.

    def compute_length(self, start, end):
        #takes [n,3] vectors and returns [1,n] array of distances
        a = start - end
        d = a**2
        e = sum(d.T)
        distance = numpy.sqrt(e)

        mask = distance < self.tolerance
        distance[mask] = numpy.Infinity

        return distance

    #def eval_children(self, rays, points, mask=slice(None,None,None)):
    #   return None

    def interpolate_z(self, first, threeD, twoD):
        # takes an 3d origin, a 3d point on the line, and intrpolates the third
        # dimesnsion of another point on that line, fow which x and y are given
        # and z is 0

        # len2d1/len2d2 = LenInZ1/LenInZ2

        len2d1 = self.compute_length(first[:, :2], threeD[:, :2])
        len2d2 = self.compute_length(first[:, :2], twoD[:, :2])

        k = len2d2 / len2d1

        #Zf = Z1 - k*ChangeinZ
        z = first[:, 2] - (first[:, 2] - threeD[:, 2]) * k

        return z

    def involine(self, theta, args):
        """function for fsolve to solve for theta given an intersecting line"""
        X = theta
        m, b, E, R = args

        sin = numpy.sin
        cos = numpy.cos

        z = numpy.array((E - R * X + m * R) * sin(X) +
                        (m * E - m * R * X - R) * cos(X) - b)
        return z

    def pts2theta(self, theta, xyz):
        """function for fsolve to find theta given an intersection point"""
        W = self.width
        R = self.tube_radius

        x = xyz[0][0]
        y = xyz[0][1]

        r1_x, r1_y = -R * numpy.sin(theta), -R * numpy.cos(theta)
        r2_mag = numpy.abs(W - R * theta)
        r2_x, r2_y = -r2_mag * numpy.cos(theta), r2_mag * numpy.sin(theta)

        x0 = x - (r1_x + r2_x)
        y0 = y - (r1_y + r2_y)

        #distance = numpy.array([x0**2 + y0**2])
        return numpy.array([x0[0], y0[0]])
        #return numpy.array([distance])

    def compute_normal(self, points):
        """
        evaluate normalised surface Normal vector
        """
        n = points.shape[0]  #returns how many points there are
        W = self.width
        R = self.tube_radius
        t = self.transform

        #The intelligent guess relies on the thetas used to find these points
        #in the intesect method.
        angles = self.intel_guess

        #this is a terrible guess:
        #guess = (self.begin_angle+self.end_angle)/2
        #guess = numpy.radians(guess)
        inv_t = t.linear_inverse
        t_points = transformPoints(inv_t, points)

        r2_x = numpy.zeros(n)
        r2_y = r2_x.copy()

        for j, point in enumerate(t_points):
            result = fsolve(self.pts2theta, angles[j], [point], full_output=1)
            theta = result[0]
            #print "theta:\n",theta
            #print result[2]  #error if this != 1
            r2_mag = numpy.abs(W - R * theta)
            r2_x[j], r2_y[j] = -r2_mag * numpy.cos(theta), r2_mag * numpy.sin(
                theta)

        n_x = -r2_x
        n_y = -r2_y
        n_z = numpy.zeros(n_x.shape)
        t_normal = numpy.column_stack((n_x, n_y, n_z))

        #print t_normal
        #        coefs = self.owner.vtk_quadric.coefficients
        #        ax = 2*coefs[0]/coefs[8]
        #        ay = 2*coefs[1]/coefs[8]
        #        t_normal = numpy.column_stack((ax*t_points[:,0],
        #                                       ay*t_points[:,1],
        #                                       -numpy.ones(n)))
        return transformNormals(t, t_normal)

    def intersect(self, P1, P2, max_length):
        """
        
        @param p1: a (n,3) array of points, start of each ray
        @param p2: a (n,3) array of point, ends of the rays
        """
        R = self.tube_radius
        W = self.width
        intel_guess = self.intel_guess
        begin_angle = numpy.radians(self.begin_angle)
        end_angle = numpy.radians(self.end_angle)
        n = P1.shape[0]  #returns how many points there are

        #turn array of points into y = mx + q
        m = (P1[:, 1] - P2[:, 1]) / (P1[:, 0] - P2[:, 0])  # m = y1-y2 / x1-x2
        q = P1[:, 1] - m * P1[:, 0]  #q = y - mx

        #define involute (function of theta and starting parameters only)
        #r1_x, r1_y = -R*numpy.sin(theta),-R*numpy.cos(theta)
        #r2_mag = numpy.abs(W - R * theta)
        #r2_x, r2_y = -r2_mag*numpy.cos(theta), r2_mag*numpy.sin(theta)

        #Where do these lines intersect with the involute? solve for theta when
        #line = involute

        #there are only two solutions per 2Pi, so guessing every pi will give
        #every possible result.
        # Fixed: fsolve wants a better guess than that, so I used higher resolution
        guesses = numpy.arange(begin_angle, end_angle, numpy.pi / 8)
        guesses = numpy.append(guesses, end_angle)

        #give output array the right shape
        intersect = numpy.ones([guesses.shape[0], n, 1]) * numpy.Inf

        #calculate all the thetas at which lines intersect involute
        for j, guess in enumerate(guesses):
            for i, z in enumerate(m):
                result = fsolve(self.involine, [guess], [m[i], q[i], W, R],
                                full_output=1)
                #print "guess, m, q, W, R", guess,m[i],q[i],W,R
                #print "??:",result[0]
                bounds = numpy.logical_and(result[0] >= begin_angle,
                                           result[0] <= end_angle)
                #make sure fsolve found an acceptable answer
                if numpy.logical_and(result[2] == 1, bounds):
                    intersect[j, i] = result[0]
                else:
                    intersect[j, i] = numpy.Inf

        #intersect now contains all the possible intersections by their theta value
        #To calculate distances and directions of travel, we need x,y and z.
        #actually, leave z out of it for now.

        intersection_points = numpy.zeros([guesses.shape[0], n, 2])
        #print "points shape:",intersection_points.shape
        intel_guess = intersect.copy()
        #print "guess shape:",intel_guess.shape

        for j, theta in enumerate(intersect):
            #print "theta for ",j
            #print theta
            r1_x, r1_y = -R * numpy.sin(theta), -R * numpy.cos(theta)
            r2_mag = numpy.abs(W - R * theta)
            r2_x, r2_y = -r2_mag * numpy.cos(theta), r2_mag * numpy.sin(theta)
            x = r1_x + r2_x
            y = r1_y + r2_y
            z = 0
            for i, z in enumerate(intersection_points[j]):
                intersection_points[j, i] = [x[i][0], y[i][0]]
        #print "intersect:"
        #print intersection_points
        # Now, are the intersections along the direction of travel?

        s = P2[:, :2] - P1[:, :2]  #the 2d summed vector: v1 + vs = v2
        len_s = self.compute_length(P1[:, :2], P2[:, :2])  #and 2d length
        dead_ray = len_s == 0  #rays of length = 0 have nonsense normals
        s_n = s.copy()  #initialize the array
        for i, z in enumerate(s):  #normalize the vectors
            if dead_ray[i]:
                s_n[i] = numpy.zeros(s_n.shape[1])
            else:
                a = s[i, :] / len_s[i]
                s_n[i] = a

        s1 = numpy.ones(intersection_points.shape) * numpy.Inf
        len_s1 = numpy.ones([guesses.shape[0], n]) * numpy.Inf
        s1_n = s1.copy()

        for j, inter in enumerate(intersection_points):
            s1[j] = inter - P1[:, :2]
            len_s1[j] = self.compute_length(P1[:, :2], inter)
            dead_ray = len_s1[j] == 0
            for i, z in enumerate(s1[j]):
                if dead_ray[i]:
                    s1_n[j, i] = numpy.zeros(s1_n.shape[1])
                else:
                    a = s1[j, i, :] / len_s1[j, i]
                    s1_n[j, i] = a
        #print "s1_n:"
        #print s1_n
        #now use the normals to filter out intersections that are in the wrong direction

        backwards = numpy.zeros(s1_n.shape, bool)

        # since both are vectors of length one in same dir or 180 deg apart,
        # addition should have len 2 or 0.

        for j, v in enumerate(s1_n):
            for i, z in enumerate(s1_n[j]):
                temp = (s_n[i] + s1_n[j, i])**2
                backwards[j, i] = sum(temp.T) < 1

        intersection_points[backwards] = numpy.inf
        guess_mask = backwards[:, :, :1]
        intel_guess[guess_mask] = numpy.Inf

        #now the z values can easily be interpolated:
        #change in z is proportional to total offest
        third_d = numpy.zeros([guesses.shape[0], n, 1])
        intersection_points = numpy.append(intersection_points,
                                           third_d,
                                           axis=2)

        for j, point in enumerate(intersection_points):
            z1 = self.interpolate_z(P1, P2, point)
            z1[z1 - z1 != 0] = numpy.inf  #is z is a number, this will be false
            point[:, 2] = z1

        #Some of these supposed intersections dont actually hit the shape
        #within the given bounds.
        #x and y bounds were taken care of by limits on theta earlier.

        Z_bounds = numpy.array([0, self.length])

        zmin, zmax = min(Z_bounds), max(Z_bounds)

        for i, points in enumerate(intersection_points):
            bounds_mask1 = numpy.zeros(points[:, 0].shape, dtype=bool)

            bounds_mask1 = numpy.logical_or(bounds_mask1, points[:, 2] < zmin)
            bounds_mask1 = numpy.logical_or(bounds_mask1, points[:, 2] > zmax)
            bounds_mask1 = numpy.array([bounds_mask1] * 3).T
            points[bounds_mask1] = numpy.inf
            guess_mask = bounds_mask1[:, :1]
            intel_guess[i][guess_mask] = numpy.inf
        # next, use the distance from start to intersection to select the first
        # intersections if there are multiple

        actual = intersection_points[0] * numpy.inf
        best_guess = intel_guess[0].copy()
        for j, points in enumerate(intersection_points):
            select = self.compute_length(P1, actual) < self.compute_length(
                P1, points)
            for i, n in enumerate(actual):
                if select[i]:
                    pass
                else:
                    actual[i] = points[i, :]
                    best_guess[i] = intel_guess[j, i]

        best_guess = best_guess[numpy.where(best_guess != numpy.inf)]
        self.intel_guess = best_guess
        #finally, be sure the ray length to intersection is longer than the tolerance

        #tol_mask = self.compute_length(P1, actual) < self.tolerance

        #tol_mask = numpy.array([tol_mask]*3).T
        #actual[tol_mask] = numpy.inf

        dtype = ([('length', 'f8'), ('face', 'O'), ('point', 'f8', 3)])
        result = numpy.empty(P1.shape[0], dtype=dtype)
        result['length'] = self.compute_length(P1, actual)
        result['face'] = self
        result['point'] = actual
        return result
Beispiel #14
0
class CylinderFace(Face):
    name = "Cylinder Face"
    length = PrototypedFrom("owner")
    radius = PrototypedFrom("owner")
    """angles must be between 0 and 360!"""
    begin_angle = PrototypedFrom("owner")
    end_angle = PrototypedFrom("owner")

    def compute_normal(self, points):
        """
        evaluate surface normal vector
        """
        n = points.shape[0]  #returns how many points there are
        t = self.transform
        inv_t = t.linear_inverse
        t_points = transformPoints(inv_t, points)

        #its just a circle, so the vector pointing to the surface from the orgin
        # already is the surface normal vector! y component = 0

        t_points[:, 1] = numpy.zeros(t_points[:, 1].shape)

        L = compute_length(numpy.zeros(t_points.shape), t_points)

        n_x = t_points[:, 0]
        n_y = t_points[:, 1]  # all = 0
        n_z = t_points[:, 2]

        t_normal = numpy.column_stack((n_x, n_y, n_z))

        return transformNormals(t, t_normal)

    def intersect(self, P1, P2, max_length):
        """ takes lines from P1 to P2 and returns first intersection in local 
        coordinates.  z axis is normal to surface at x=0 which is also angle =0.
        However, y and z are switched for easy reuse of code.
        
        if this code ever needs optimization, one good step is to parameterize 
        circle first, and solve for intersection and theta of intersection to
        begin with and then use that theta for boundry conditions
        """

        n = P1.shape[0]
        R = self.radius

        #This was originally written with the Z axis as the long axis of the trough,
        #but inorder for the direction parameter to be useful and point from
        #vertex to focus, the y axis must be the long axis.  So, y and z are
        #here switched for all the following calculations and then switched back
        # right before the function returns its points

        P1[:, 1:] = numpy.fliplr(P1[:, 1:]).copy()
        P2[:, 1:] = numpy.fliplr(P2[:, 1:]).copy()

        #turn array of points into y = mx + q
        m = (P1[:, 1] - P2[:, 1]) / (P1[:, 0] - P2[:, 0])  # m = y1-y2 / x1-x2
        q = P1[:, 1] - m * P1[:, 0]  #q = y - mx

        #solve system of equations: y = mx+b and y^2 + x^2 = R^2

        a = (m**2 + 1)
        b = 2 * m * q
        c = q**2 - R**2 * numpy.ones(q.shape)

        d = b**2 - 4 * a * c

        miss_mask = d < 0

        E = numpy.sqrt(d)
        roots = numpy.array([(-b + E) / (2 * a), (-b - E) / (2 * a)])
        for root in roots:
            root[miss_mask] = numpy.inf

        root1, root2 = roots

        #print "roots: ", roots

        #put these roots into a list of intersection points using y = mx + q
        #I make these 3d with z=0, Which i'll fix later
        inter1 = numpy.array([root1, m * root1 + q, numpy.zeros(n)]).T
        inter2 = numpy.array([root2, m * root2 + q, numpy.zeros(n)]).T

        #Where the slope was infinite these values are wrong:

        pos_result = numpy.array(
            [P1[:, 0],
             numpy.sqrt(R**2 - P1[:, 0]**2),
             numpy.zeros(n)]).T
        neg_result = numpy.array(
            [P1[:, 0], -numpy.sqrt(R**2 - P1[:, 0]**2),
             numpy.zeros(n)]).T
        perp_fix = numpy.array([numpy.abs(m) > 1000000.**2] * 3).T

        #print "?: ",m, "\n",perp_fix

        inter1 = numpy.where(perp_fix, pos_result, inter1)
        inter2 = numpy.where(perp_fix, neg_result, inter2)

        #print "1: ",inter1,"\n",inter2

        #Where the ray was parallel to the long axis, the above fix fixes wrong

        parallel_result = numpy.array(numpy.ones([n, 3]) * numpy.inf).T
        parallel_cond = numpy.logical_and([P1[:, 0] == P2[:, 0]],
                                          [P1[:, 1] == P2[:, 1]]).T
        parallel_fix = numpy.zeros((n, 3), dtype=bool)
        for i, z in enumerate(parallel_cond):
            parallel_fix[i] = z

        inter1[parallel_fix] = numpy.inf
        inter2[parallel_fix] = numpy.inf

        #and where there is a total miss, we want an inf, not a NaN
        miss_result = numpy.array(numpy.ones([n, 3]) * numpy.inf).T
        miss_fix = d < 0

        inter1[miss_fix] = numpy.inf
        inter2[miss_fix] = numpy.inf

        # Now, are the intersections along the direction of travel?

        s = P2[:, :2] - P1[:, :2]  #the 2d summed vector: v1 + vs = v2
        len_s = compute_length(P1[:, :2], P2[:, :2])  #and 2d length
        dead_ray = len_s == 0  #rays of length = 0 have nonsense normals
        s_n = s  #initialize the array
        for i, z in enumerate(s):  #normalize the vectors
            if dead_ray[i]:
                s_n[i] = numpy.zeros(s_n.shape[1])
            else:
                a = s[i, :] / len_s[i]
                s_n[i] = a

        s1 = inter1[:, :2] - P1[:, :2]
        len_s1 = compute_length(P1[:, :2], inter1[:, :2])
        dead_ray = len_s1 == 0
        s1_n = s1
        for i, z in enumerate(s1):
            if dead_ray[i]:
                s1_n[i] = numpy.zeros(s1_n.shape[1])
            else:
                a = s1[i, :] / len_s1[i]
                s1_n[i] = a

        s2 = inter2[:, :2] - P1[:, :2]
        len_s2 = compute_length(P1[:, :2], inter2[:, :2])
        dead_ray = len_s2 == 0
        s2_n = s2
        for i, z in enumerate(s1):
            if dead_ray[i]:
                s2_n[i] = numpy.zeros(s2_n.shape[1])
            else:
                a = s2[i, :] / len_s2[i]
                s2_n[i] = a

        #now use the normals to filter out intersections that are in the wrong direction

        backwards1 = numpy.zeros(s_n.shape, bool)
        backwards2 = backwards1.copy()

        #print inter1
        #print inter2

        # since both are vectors of length one in same dir or 180 deg apart,
        # addition should have len 2 or 0.

        for i, z in enumerate(s_n):

            temp = (s_n[i] + s1_n[i])**2
            backwards1[i] = sum(temp.T) < 1

            temp2 = (s_n[i] + s2_n[i])**2
            backwards2[i] = sum(temp2.T) < 1

        inter1[backwards1] = numpy.inf
        inter2[backwards2] = numpy.inf

        #print inter1
        #print inter2

        #now the z values can easily be interpolated:
        #change in z is proportional to total offest

        z1 = interpolate_z(P1, P2, inter1)
        z1[z1 - z1 != 0] = numpy.inf  #is z is a number, this will be false
        z2 = interpolate_z(P1, P2, inter2)
        z2[z2 - z2 != 0] = numpy.inf
        inter1[:, 2] = z1
        inter2[:, 2] = z2

        print("2: ", inter1, "\n", inter2)
        #now apply boundries based on begin and end angle.
        # for any given x or y value, there are two possible angles.
        # but only one will be resposible for both x and y.
        # so find the possible angles between 0 and 2pi and compare
        r = 1 / R
        pi = numpy.pi

        y1_sin = numpy.arcsin(r * inter1[:, 1])
        y1_thetas = numpy.column_stack(
            (y1_sin, pi - y1_sin, (2 * pi) + y1_sin))

        x1_cos = numpy.arccos(r * inter1[:, 0])
        x1_thetas = numpy.column_stack((x1_cos, 2 * pi - x1_cos))

        print("x und y: ", inter1[:, 1], r * inter1[:, 0])
        inter1_theta = numpy.ones(n) * numpy.inf
        for i, set in enumerate(y1_thetas):
            for phi in set:
                if any(abs(phi - x1_thetas[i]) < .01):
                    inter1_theta[i] = phi

        y2_sin = numpy.arcsin(r * inter2[:, 1])
        y2_thetas = numpy.column_stack(
            (y2_sin, pi - y2_sin, (2 * pi) + y2_sin))

        x2_cos = numpy.arccos(r * inter2[:, 0])
        x2_thetas = numpy.column_stack((x2_cos, 2 * pi - x2_cos))

        inter2_theta = numpy.ones(n) * numpy.inf
        for i, set in enumerate(y2_thetas):
            for phi in set:
                if any(abs(phi - x2_thetas[i]) < .01):
                    inter2_theta[i] = phi

        # and use that angle to check for boundries
        #print "thetas: ",inter1_theta,inter2_theta
        #print "sections: ", inter2_theta
        small_angle = numpy.radians(self.begin_angle)
        big_angle = numpy.radians(self.end_angle)

        length = self.length

        boundry_mask1 = inter1_theta < small_angle
        boundry_mask1 = numpy.logical_or(boundry_mask1,
                                         inter1_theta > big_angle)
        boundry_mask1 = numpy.logical_or(boundry_mask1, inter1[:, 2] > length)
        boundry_mask1 = numpy.logical_or(boundry_mask1, inter1[:, 2] < 0)

        boundry_mask1 = numpy.array([boundry_mask1] * 3).T
        inter1[boundry_mask1] = numpy.inf

        boundry_mask2 = inter2_theta < small_angle
        boundry_mask2 = numpy.logical_or(boundry_mask2,
                                         inter2_theta > big_angle)
        boundry_mask2 = numpy.logical_or(boundry_mask2, inter2[:, 2] > length)
        boundry_mask2 = numpy.logical_or(boundry_mask2, inter2[:, 2] < 0)

        boundry_mask2 = numpy.array([boundry_mask2] * 3).T
        inter2[boundry_mask2] = numpy.inf

        print("3: ", inter1, "\n", inter2)
        #print "boundy masks: \n", boundry_mask2, "\n", inter2
        # next, use the distance from start to intersection to select the first
        # intersections if there are multiple

        select = compute_length(P1, inter2) < compute_length(P1, inter1)

        #shortest = numpy.where(select, root1, root2)
        #mmm, numpy.where didn't like selecting vectors for some reason
        # So, I'll do it long hand
        select = compute_length(P1, inter2) < compute_length(P1, inter1)
        actual = inter1.copy()
        for i, n in enumerate(inter1):
            if select[i]:
                actual[i] = inter2[i, :]
            else:
                actual[i] = inter1[i, :]

        #finally, be sure the ray length to intersection is longer than the tolerance

        #tol_mask = self.compute_length(P1, actual) < self.tolerance

        #tol_mask = numpy.array([tol_mask]*3).T
        #actual[tol_mask] = numpy.inf

        dtype = ([('length', 'f8'), ('face', 'O'), ('point', 'f8', 3)])
        result = numpy.empty(P1.shape[0], dtype=dtype)
        result['length'] = compute_length(P1, actual)

        #flip y and z back to the standard order
        actual[:, 1:] = numpy.fliplr(actual[:, 1:]).copy()
        result['face'] = self
        result['point'] = actual
        return result
Beispiel #15
0
class PhysioData(HasTraits):
    """
    Contains the parameters needed to run a MEAP session
    """

    available_widgets = Instance(list)
    def _available_widgets_default(self):
        available_panels = ["Annotation"]
        if "dzdt" in self.contents and "z0" in self.contents:
            available_panels.append("ICG B Point")
        if "doppler" in self.contents:
            available_panels.append("Doppler")
        if self.dzdt_warping_functions.size > 0:
            available_panels.append("Registration")
        
        return available_panels


    contents = Property(Set)
    def _get_contents(self):
        """
        Assuming this object is already initialized, this trait
        will check for which data are available. For each signal
        type if the raw timeseries is available,
        """
        contents = set()
        for signal in ENSEMBLE_SIGNALS | set(('respiration',)):
            attr = signal+"_data"
            if not hasattr(self, attr): continue
            if getattr(self,attr).size > 0:
                contents.update((signal,))

        # Check for respiration-corrected versions of z0 and dzdt
        for signal in ["resp_corrected_z0", "resp_corrected_dzdt"]:
            if not hasattr(self, signal): continue
            if getattr(self,signal).size > 0:
                contents.update((signal,))

        return contents

    calculable_indexes = Property(Set)
    @cached_property
    def _get_calculable_indexes(self):
        """
        Determines, based on content, which indexes are possible
        to calculate.
        """
        # Signals
        has_ecg = "ecg" in self.contents
        has_z0 = "z0" in self.contents
        has_dzdt = "dzdt" in self.contents
        has_resp = "respiration" in self.contents
        has_systolic = "systolic" in self.contents
        has_diastolic = "diastolic" in self.contents
        has_bp = "bp" in self.contents
        has_resp_corrected_z0 = self.resp_corrected_z0.size > 0
        has_l = self.subject_l > 1


        # Indexes
        has_hr = False
        has_lvet = False
        has_sv = False
        has_map = False
        has_co = False

        ix = set()
        if has_ecg:
            has_hr = True
            ix.update(("hr","hrv"))
        if has_ecg and has_dzdt:
            has_lvet = True
            ix.update(("pep", "lvet", "eef"))
        if has_lvet and has_l and has_z0:
            has_sv = True
            ix.update(("sv",))
            if has_resp_corrected_z0:
                ix.update(("resp_corrected_sv",))
        if has_bp or has_systolic and has_diastolic:
            has_map = True
            ix.update(("map",))
        if has_hr and has_sv:
            has_co = True
            ix.update(("co",))
            if has_resp_corrected_z0:
                ix.update(("resp_corrected_co",))
        if has_co and has_map:
            ix.update(("tpr",))
            if has_resp_corrected_z0:
                ix.update(("resp_corrected_tpr",))
        if has_resp:
            ix.update(("nbreaths"))
        return ix

    meap_version = CStr(__version__)
    original_file = File
    file_location = File

    # -- Censored Epochs --
    censored_intervals = Array
    censoring_sources = List

    @cached_property
    def _get_censored_regions(self):
        censor_regions = []
        for signal in self.contents:
            censor_regions += getattr(self, signal+"_ts").censored_regions

    # MEA Weighting function
    mea_window_type = PrototypedFrom("config")
    mea_n_neighbors = PrototypedFrom("config")
    mea_window_secs = PrototypedFrom("config")
    mea_exp_power = PrototypedFrom("config")
    mea_func_name = PrototypedFrom("config")
    mea_weight_direction = PrototypedFrom("config")
    use_trimmed_co = PrototypedFrom("config")
    mea_smooth_hr = PrototypedFrom("config")
    mea_weights = Array

    use_secondary_heartbeat = PrototypedFrom("config")
    secondary_heartbeat = PrototypedFrom("config")
    secondary_heartbeat_pre_msec = PrototypedFrom("config")
    secondary_heartbeat_abs = PrototypedFrom("config")
    secondary_heartbeat_window = PrototypedFrom("config")
    secondary_heartbeat_window_len = PrototypedFrom("config")
    secondary_heartbeat_n_likelihood_bins = PrototypedFrom("config")

    use_ECG2 = PrototypedFrom("config")
    ecg2_weight = PrototypedFrom("config")
    qrs_signal_source = PrototypedFrom("config")

    # Bpoint classifier options
    bpoint_classifier_pre_point_msec = PrototypedFrom("config")
    bpoint_classifier_post_point_msec = PrototypedFrom("config")
    bpoint_classifier_sample_every_n_msec =PrototypedFrom("config")
    bpoint_classifier_false_distance_min =PrototypedFrom("config")
    bpoint_classifier_use_bpoint_prior =PrototypedFrom("config")
    bpoint_classifier_include_derivative =PrototypedFrom("config")
    # Contains errors in msec from bpoint cross validation
    bpoint_classifier_cv_error = Array

    # Points on doppler signal
    dx_point_type = PrototypedFrom("config")
    dx_point_window_len = PrototypedFrom("config")
    db_point_type = PrototypedFrom("config")
    db_point_window_len = PrototypedFrom("config")

    # Impedance Data
    z0_winsor_min = CFloat(0.005)
    z0_winsor_max = CFloat(0.005)
    z0_winsorize = CBool(False)
    z0_included = CBool(False)
    z0_decimated = CBool(False)
    z0_channel_name = CStr("")
    z0_sampling_rate = CFloat(1000)
    z0_sampling_rate_unit = CStr("Hz")
    z0_unit = CStr("Ohms")
    z0_start_time = CFloat(0.)
    z0_data = Array
    mea_z0_matrix = Array
    z0_matrix = Property(Array,depends_on="peak_indices")
    def _get_z0_matrix(self):
        if self.peak_indices.size == 0: return np.array([])
        return peak_stack(self.peak_indices,self.z0_data,
                          pre_msec=self.dzdt_pre_peak,post_msec=self.dzdt_post_peak,
                          sampling_rate=self.z0_sampling_rate)

    mea_resp_corrected_z0_matrix = Array
    resp_corrected_z0_matrix = Property(Array,depends_on="peak_indices")
    def _get_resp_corrected_z0_matrix(self):
        if self.peak_indices.size == 0 or self.resp_corrected_z0.size == 0:
            return np.array([])
        return peak_stack(self.peak_indices,self.resp_corrected_z0,
                          pre_msec=self.dzdt_pre_peak,post_msec=self.dzdt_post_peak,
                          sampling_rate=self.z0_sampling_rate)

    dzdt_winsor_min = CFloat(0.005)
    dzdt_winsor_max = CFloat(0.005)
    dzdt_winsorize = CBool(False)
    dzdt_included = CBool(False)
    dzdt_decimated = CBool(False)
    dzdt_channel_name = CStr("")
    dzdt_sampling_rate = CFloat(1000)
    dzdt_sampling_rate_unit = CStr("Hz")
    dzdt_unit = CStr("Ohms/Sec")
    dzdt_start_time = CFloat(0.)
    dzdt_data = Array
    dzdt_matrix = Property(Array,depends_on="peak_indices")
    mea_dzdt_matrix = Array
    @cached_property
    def _get_dzdt_matrix(self):
        logger.info("constructing dZ/dt matrix")
        if self.peak_indices.size == 0: return np.array([])
        return peak_stack(self.peak_indices,self.dzdt_data,
                          pre_msec=self.dzdt_pre_peak,post_msec=self.dzdt_post_peak,
                          sampling_rate=self.dzdt_sampling_rate)

    # Doppler radar
    doppler_winsor_min = CFloat(0.005)
    doppler_winsor_max = CFloat(0.005)
    doppler_winsorize = CBool(False)
    doppler_included = CBool(False)
    doppler_decimated = CBool(False)
    doppler_channel_name = CStr("")
    doppler_sampling_rate = CFloat(1000)
    doppler_sampling_rate_unit = CStr("Hz")
    doppler_unit = CStr("Ohms/Sec")
    doppler_start_time = CFloat(0.)
    doppler_data = Array
    doppler_matrix = Property(Array,depends_on="peak_indices")
    mea_doppler_matrix = Array
    @cached_property
    def _get_doppler_matrix(self):
        if self.peak_indices.size == 0: return np.array([])
        return peak_stack(self.peak_indices,self.doppler_data,
                          pre_msec=self.doppler_pre_peak,post_msec=self.doppler_post_peak,
                          sampling_rate=self.doppler_sampling_rate)

    # Respiration
    resp_corrected_dzdt_matrix = Property(Array,depends_on="peak_indices")
    mea_resp_corrected_dzdt_matrix = Array
    @cached_property
    def _get_resp_corrected_dzdt_matrix(self):
        if self.peak_indices.size == 0 or self.resp_corrected_dzdt.size == 0:
            return np.array([])
        return peak_stack(self.peak_indices,self.resp_corrected_dzdt,
                          pre_msec=self.dzdt_pre_peak,post_msec=self.dzdt_post_peak,
                          sampling_rate=self.dzdt_sampling_rate)


    # ECG
    ecg_included = CBool(False)
    ecg_winsor_min = CFloat(0.005)
    ecg_winsor_max = CFloat(0.005)
    ecg_winsorize = CBool(False)
    ecg_decimated = CBool(False)
    ecg_channel_name = CStr("")
    ecg_sampling_rate = CFloat(1000)
    ecg_sampling_rate_unit = CStr("Hz")
    ecg_unit = CStr("V")
    ecg_start_time = CFloat(0.)
    ecg_data = Array
    ecg_matrix = Property(Array,depends_on="peak_indices")
    mea_ecg_matrix = Array
    @cached_property
    def _get_ecg_matrix(self):
        if self.peak_indices.size == 0: return np.array([])
        return peak_stack(self.peak_indices,self.ecg_data,
                          pre_msec=self.ecg_pre_peak,post_msec=self.ecg_post_peak,
                          sampling_rate=self.ecg_sampling_rate)

    # ECG Secondary (eg from EEG)
    ecg2_included = CBool(False)
    ecg2_winsor_min = CFloat(0.005)
    ecg2_winsor_max = CFloat(0.005)
    ecg2_winsorize = CBool(False)
    ecg2_decimated = CBool(False)
    ecg2_channel_name = CStr("")
    ecg2_sampling_rate = CFloat(1000)
    ecg2_sampling_rate_unit = CStr("Hz")
    ecg2_unit = CStr("V")
    ecg2_start_time = CFloat(0.)
    ecg2_data = Array
    ecg2_matrix = Property(Array,depends_on="peak_indices")
    mea_ecg2_matrix = Array
    @cached_property
    def _get_ecg2_matrix(self):
        if self.peak_indices.size == 0: return np.array([])
        return peak_stack(self.peak_indices,self.ecg2_data,
                          pre_msec=self.ecg_pre_peak,post_msec=self.ecg_post_peak,
                          sampling_rate=self.ecg_sampling_rate)

    # Blood pressure might come from a CNAP
    using_continuous_bp = CBool(False)
    bp_included = CBool(False)
    bp_winsor_min = CFloat(0.005)
    bp_winsor_max = CFloat(0.005)
    bp_winsorize = CBool(False)
    bp_decimated = CBool(False)
    bp_channel_name = CStr("")
    bp_sampling_rate = CFloat(1000)
    bp_sampling_rate_unit = CStr("Hz")
    bp_unit = CStr("mmHg")
    bp_start_time = CFloat(0.)
    bp_data = Array
    bp_matrix = Property(Array,depends_on="peak_indices")
    mea_bp_matrix = Array
    @cached_property
    def _get_bp_matrix(self):
        return peak_stack(self.peak_indices,self.bp_data,
                          pre_msec=self.bp_pre_peak,post_msec=self.bp_post_peak,
                          sampling_rate=self.bp_sampling_rate)

    # Or two separate channels
    systolic_included = CBool(False)
    systolic_winsor_min = CFloat(0.005)
    systolic_winsor_max = CFloat(0.005)
    systolic_winsorize = CBool(False)
    systolic_decimated = CBool(False)
    systolic_channel_name = CStr("")
    systolic_sampling_rate = CFloat(1000)
    systolic_sampling_rate_unit = CStr("Hz")
    systolic_unit = CStr("mmHg")
    systolic_start_time = CFloat(0.)
    systolic_data = Array
    systolic_matrix = Property(Array,
                               depends_on="peak_indices,bp_pre_peak,bp_post_peak")
    mea_systolic_matrix = Array
    @cached_property
    def _get_systolic_matrix(self):
        if self.peak_indices.size == 0 or not ("systolic" in self.contents):
            return np.array([])
        return peak_stack(self.peak_indices,self.systolic_data,
                          pre_msec=self.bp_pre_peak,post_msec=self.bp_post_peak,
                          sampling_rate=self.bp_sampling_rate)

    diastolic_included = CBool(False)
    diastolic_winsor_min = CFloat(0.005)
    diastolic_winsor_max = CFloat(0.005)
    diastolic_winsorize = CBool(False)
    diastolic_decimated = CBool(False)
    diastolic_channel_name = CStr("")
    diastolic_sampling_rate = CFloat(1000)
    diastolic_sampling_rate_unit = CStr("Hz")
    diastolic_unit = CStr("Ohms")
    diastolic_start_time = CFloat(0.)
    diastolic_data = Array
    diastolic_matrix = Property(Array,
                                depends_on="peak_indices,bp_pre_peak,bp_post_peak")
    mea_diastolic_matrix = Array
    @cached_property
    def _get_diastolic_matrix(self):
        if self.peak_indices.size == 0 or not ("diastolic" in self.contents):
            return np.array([])
        return peak_stack(self.peak_indices,self.diastolic_data,
                          pre_msec=self.bp_pre_peak,post_msec=self.bp_post_peak,
                          sampling_rate=self.bp_sampling_rate)

    respiration_included = CBool(False)
    respiration_winsor_min = CFloat(0.005)
    respiration_winsor_max = CFloat(0.005)
    respiration_winsorize = CBool(False)
    respiration_decimated = CBool(False)
    respiration_channel_name = CStr("")
    respiration_sampling_rate = CFloat(1000)
    respiration_sampling_rate_unit = CStr("Hz")
    respiration_unit = CStr("Ohms")
    respiration_start_time = CFloat(0.)
    respiration_data = Array
    respiration_cycle = Array
    respiration_amount = Array
    resp_corrected_z0 = Array
    resp_corrected_dzdt = Array
    processed_respiration_data = Array
    processed_respiration_time = Array

    # -- Event marking signals (experiment and mri-related)
    mri_trigger_times = Array
    mri_trigger_included = CBool(False)
    mri_trigger_decimated = CBool(False)
    mri_trigger_channel_name = CStr("")
    mri_trigger_sampling_rate = CFloat(1000)
    mri_trigger_sampling_rate_unit = CStr("Hz")
    mri_trigger_unit = CStr("V")
    mri_trigger_start_time = CFloat(0.)
    event_names = List
    event_sampling_rate = CFloat(1000)
    event_included = CBool(True)
    event_decimated = CBool(False)
    event_start_time = CFloat(0.)
    event_sampling_rate_unit = "Hz"
    event_unit = CStr("Hz")

    # -- results of peak detection
    peak_times = Array
    peak_indices = CArray(dtype=np.int)
    # Non-markable heartbeats
    dne_peak_times = Array
    dne_peak_indices = CArray(dtype=np.int)
    # Any custom labels for heartbeats go here
    hand_labeled = Instance(np.ndarray) # An array of beat indices, each corresponding
    def _hand_labeled_default(self):
        return np.zeros_like(self.peak_indices)
    # Is the beat usable for analysis?
    usable = Instance(np.ndarray)
    def _usable_default(self):
        return np.ones(len(self.peak_indices),dtype=np.int)

    p_indices = Instance(np.ndarray)
    def _p_indices_default(self):
        return np.zeros_like(self.peak_indices)
    q_indices = Instance(np.ndarray)
    def _q_indices_default(self):
        return np.zeros_like(self.peak_indices)
    r_indices = Instance(np.ndarray)
    def _r_indices_default(self):
        return np.zeros_like(self.peak_indices)
    s_indices = Instance(np.ndarray)
    def _s_indices_default(self):
        return np.zeros_like(self.peak_indices)
    t_indices = Instance(np.ndarray)
    def _t_indices_default(self):
        return np.zeros_like(self.peak_indices)
    b_indices = Instance(np.ndarray)
    def _b_indices_default(self):
        return np.zeros_like(self.peak_indices)
    c_indices = Instance(np.ndarray)
    def _c_indices_default(self):
        return np.zeros_like(self.peak_indices)
    x_indices = Instance(np.ndarray)
    def _x_indices_default(self):
        return np.zeros_like(self.peak_indices)
    o_indices = Instance(np.ndarray)
    def _o_indices_default(self):
        return np.zeros_like(self.peak_indices)
    systole_indices = Instance(np.ndarray)
    def _systole_indices_default(self):
        return np.zeros_like(self.peak_indices)
    diastole_indices = Instance(np.ndarray)
    def _diastole_indices_default(self):
        return np.zeros_like(self.peak_indices)

    # Indices for doppler
    db_indices = Instance(np.ndarray)
    def _db_indices_default(self):
        return np.zeros_like(self.peak_indices)
    dx_indices = Instance(np.ndarray)
    def _dx_indices_default(self):
        return np.zeros_like(self.peak_indices)

    # Holds B points in the Karcher modes
    karcher_b_indices = Instance(np.ndarray)
    def _karcher_b_indices_default(self):
        return np.zeros(self.n_modes)
    
    # --- Subject information
    subject_age = CFloat(0.)
    subject_gender = Enum("M","F")
    subject_weight = CFloat(0.,label="Weight (lbs)")
    subject_height_ft = Int(0,label="Height (ft)",
                            desc="Subject's height in feet")
    subject_height_in = Int(0,label = "Height (in)",
                            desc="Subject's height in inches")
    subject_electrode_distance_front = CFloat(0.,
                                              label="Impedance electrode distance (front)")
    subject_electrode_distance_back = CFloat(0.,
                                             label="Impedance electrode distance (back)")
    subject_electrode_distance_right = CFloat(0.,
                                              label="Impedance electrode distance (back)")
    subject_electrode_distance_left = CFloat(0.,
                                             label="Impedance electrode distance (back)")
    subject_resp_max = CFloat(0.,label="Respiration circumference max (cm)")
    subject_resp_min = CFloat(0.,label="Respiration circumference min (cm)")
    subject_in_mri = CBool(False,label="Subject was in MRI scanner")
    subject_control_base_impedance = CFloat(0.,label="Control Imprdance",
                                            desc="If in MRI, store the z0 value from outside the MRI")

    subject_l = Property(CFloat,depends_on=
                         "subject_electrode_distance_front," + \
                         "subject_electrode_distance_back," + \
                         "subject_electrode_distance_right," + \
                         "subject_electrode_distance_left," + \
                         "subject_height_ft"
                         )
    @cached_property
    def _get_subject_l(self):
        """
        Uses information from the subject measurements to define the
        l variable for calculating stroke volume.

        if left and right electrode distances are provided, use the average
        if front and back electrode distances are provided, use the average
        if subject height in feet and inches is provided, use the estimate of
             l = 0.17 * height
        Otherwise return the first measurement found in front,back,left,right
        If nothing is found, returns 1

        """
        front = self.subject_electrode_distance_front
        back = self.subject_electrode_distance_back
        left = self.subject_electrode_distance_left
        right = self.subject_electrode_distance_right
        if left > 0 and right > 0:
            return (left + right) / 2.
        if front > 0 and back > 0:
            return (front + back) / 2.
        if self.subject_height_ft > 0:
            return (12*self.subject_height_ft + \
                    self.subject_height_in) * 2.54 * 0.17
        for measure in (front, back, left, right):
            if measure > 0.: return measure
        return 1

    # --- From the global configuration
    config = Instance(MEAPConfig)
    apply_ecg_smoothing = PrototypedFrom("config")
    ecg_smoothing_window_len = PrototypedFrom("config")
    apply_imp_smoothing = PrototypedFrom("config")
    imp_smoothing_window_len = PrototypedFrom("config")
    apply_bp_smoothing = PrototypedFrom("config")
    bp_smoothing_window_len = PrototypedFrom("config")
    regress_out_resp = PrototypedFrom("config")

    # parameters for processing the raw data before PT detecting
    subject_in_mri = PrototypedFrom("config")
    peak_detection_algorithm  = PrototypedFrom("config")
    # PanTomkins parameters
    qrs_source_signal = Enum("ecg", "ecg2")
    bandpass_min = PrototypedFrom("config")
    bandpass_max =PrototypedFrom("config")
    smoothing_window_len = PrototypedFrom("config")
    smoothing_window = PrototypedFrom("config")
    pt_adjust = PrototypedFrom("config")
    peak_threshold = PrototypedFrom("config")
    apply_filter = PrototypedFrom("config")
    apply_diff_sq = PrototypedFrom("config")
    apply_smooth_ma = PrototypedFrom("config")
    peak_window = PrototypedFrom("config")

    # Parameters for waveform extraction
    ecg_pre_peak = PrototypedFrom("config")
    ecg_post_peak = PrototypedFrom("config")
    dzdt_pre_peak = PrototypedFrom("config")
    dzdt_post_peak = PrototypedFrom("config")
    bp_pre_peak = PrototypedFrom("config")
    bp_post_peak = PrototypedFrom("config")
    systolic_pre_peak = PrototypedFrom("config")
    systolic_post_peak = PrototypedFrom("config")
    diastolic_pre_peak = PrototypedFrom("config")
    diastolic_post_peak = PrototypedFrom("config")
    doppler_pre_peak = PrototypedFrom("config")
    doppler_post_peak = PrototypedFrom("config")
    stroke_volume_equation = PrototypedFrom("config")

    # parameters for respiration analysis
    process_respiration = PrototypedFrom("config")
    resp_polort = PrototypedFrom("config")
    resp_high_freq_cutoff = PrototypedFrom("config")
    resp_inhale_begin_times = Array
    resp_exhale_begin_times = Array

    # Time points of the global ensemble average
    ens_avg_ecg_signal = Array
    ens_avg_dzdt_signal = Array
    ens_avg_bp_signal = Array
    ens_avg_systolic_signal = Array
    ens_avg_diastolic_signal = Array
    ens_avg_doppler_signal = Array
    ens_avg_p_time = CFloat
    ens_avg_q_time = CFloat
    ens_avg_r_time = CFloat
    ens_avg_s_time = CFloat
    ens_avg_t_time = CFloat
    ens_avg_b_time = CFloat
    ens_avg_db_time = CFloat
    ens_avg_dx_time = CFloat
    ens_avg_c_time = CFloat
    ens_avg_x_time = CFloat
    ens_avg_y_time = CFloat
    ens_avg_o_time = CFloat
    ens_avg_systole_time = CFloat
    ens_avg_diastole_time = CFloat
    using_hand_marked_point_priors = CBool(False)

    censored_secs_before = Array
    # MEA Physio timeseries
    lvet = Array
    co = Array
    resp_corrected_co = Array
    pep = Array
    sv = Array
    resp_corrected_sv = Array
    map = Array
    systolic = Array
    diastolic = Array
    hr = Array
    mea_hr = Array
    tpr = Array
    resp_corrected_tpr = Array

    def _config_default(self):
        return MEAPConfig()


    # SRVF-warping parameters
    srvf_lambda = PrototypedFrom("config")
    srvf_max_karcher_iterations = PrototypedFrom("config")
    srvf_update_min = PrototypedFrom("config")
    srvf_karcher_mean_subset_size = PrototypedFrom("config")
    srvf_multi_mode_variance_cutoff = PrototypedFrom("config")
    srvf_use_moving_ensembled = PrototypedFrom("config")
    dzdt_num_inputs_to_group_warping = PrototypedFrom("config")
    srvf_t_min = PrototypedFrom("config")
    srvf_t_max = PrototypedFrom("config")
    bspline_before_warping = PrototypedFrom("config")
    dzdt_srvf_karcher_mean = Array
    dzdt_karcher_mean = Array
    dzdt_karcher_mean_time = Array
    dzdt_warping_functions = Array
    dzdt_functions_to_warp = Array
    
    # Holds data related to initial karcher mean
    dzdt_karcher_mean_inputs = Array
    dzdt_karcher_mean_over_iterations = Array
    srvf_iteration_distances = Array
    srvf_iteration_energy = Array
    
    # Data related to the multiple modes
    n_modes = PrototypedFrom("config")
    max_kmeans_iterations = PrototypedFrom("config")
    mode_dzdt_karcher_means = Array
    mode_cluster_assignment = Array 
    mode_dzdt_srvf_karcher_means = Array

    # Storing and accessing the bpoint classifier
    bpoint_classifier_file = File

    def save(self,outfile):
        # Populate matfile-friendly data structures for censoring regions
        tmp = tempfile.NamedTemporaryFile()
        save_attrs = []
        for k in self.editable_traits():
            if k.endswith("ts"):
                continue
            if k == "available_widgets":
                continue
            if k == "bpoint_classifier":
                continue
            if k == "bpoint_classifier_file":
                continue
            if k in ("censored_regions","event_names"):
                continue
            v = getattr(self,k)
            if type(v) == np.ndarray:
                if v.size == 0: continue
            if type(v) is set: continue
            save_attrs.append(k)
        savedict = dict([(k,getattr(self,k)) \
                         for k in save_attrs if not (getattr(self,k) is None)])
        savedict["censoring_sources"] = np.array(self.censoring_sources)
        for evt in self.event_names:
            savedict[evt] = getattr(self,evt)
        savedict["event_names"] = np.array(self.event_names)
        for k,v in savedict.iteritems():
            try:
                savemat( tmp, {k:v}, long_field_names=True)
            except Exception, e:
                logger.warn("unable to save %s because of %s", k,e)
        tmp.close()
        try:
            savemat(outfile, savedict,long_field_names=True)
        except Exception,e:
            messagebox("Failed to save %s:\n\n%s"%(outfile,e))
Beispiel #16
0
class TroughFace(Face):
    name = "Parabolic Trough Face"
    EFL = PrototypedFrom("owner")
    #EFL = 5.
    EFL_centre = PrototypedFrom("owner")
    length = PrototypedFrom("owner")
    #length = 100
    X_bounds = PrototypedFrom("owner")

    #width = 20

    #takes [n,3] vectors and returns [1,n] array of distances
    def compute_length(self, start, end):
        a = start - end
        d = a**2
        e = sum(d.T)
        distance = numpy.sqrt(e)

        mask = distance < self.tolerance
        distance[mask] = numpy.Infinity

        return distance

    #def eval_children(self, rays, points, mask=slice(None,None,None)):
    #   return None

    def interpolate_z(self, first, threeD, twoD):
        # takes an 3d origin, a 3d point on the line, and intrpolates the third
        # dimesnsion of another point on that line, fow which x and y are given
        # and z is 0

        # len2d1/len2d2 = LenInZ1/LenInZ2

        len2d1 = self.compute_length(first[:, :2], threeD[:, :2])
        len2d2 = self.compute_length(first[:, :2], twoD[:, :2])

        k = len2d2 / len2d1

        #Zf = Z1 - k*ChangeinZ
        z = first[:, 2] - (first[:, 2] - threeD[:, 2]) * k

        return z

    def compute_normal(self, points):
        """
        evaluate surface normal vector
        """
        n = points.shape[0]  #returns how many points there are
        t = self.transform
        inv_t = t.linear_inverse
        t_points = transformPoints(inv_t, points)
        h = 1 / (4 * self.EFL)

        #y and z are flipped so that y axis is the long axis
        t_points[:, 1:] = numpy.fliplr(t_points[:, 1:]).copy()

        # y = hx^2 - efl
        # dy/dx = 2hx

        surface_slope = 2 * h * t_points[:, 0]

        # perpendicular to line of slope m has slope -1/m

        perp_slope = -1 / surface_slope

        #then find a line of length 1 with this slope:
        #start with (1,m) and normalize

        r_sq = 1 + perp_slope**2
        L = numpy.sqrt(r_sq)

        #to keep normals pointing inward, points with pos x will have neg x normals
        x_sign = -1 * t_points[:, 0] / numpy.abs(t_points[:, 0])

        n_x = x_sign / L
        n_y = numpy.abs(perp_slope) / L
        n_z = numpy.zeros(n_x.shape)  #surface doesn't point into Z at all

        #fix where shape was flat and normal has slope = inf
        oops = surface_slope == 0
        n_x[oops] = 0.
        n_y[oops] = 1.

        #notice, y and z are flipped back.
        t_normal = numpy.column_stack((n_x, n_z, n_y))

        #print t_normal
        #        coefs = self.owner.vtk_quadric.coefficients
        #        ax = 2*coefs[0]/coefs[8]
        #        ay = 2*coefs[1]/coefs[8]
        #        t_normal = numpy.column_stack((ax*t_points[:,0],
        #                                       ay*t_points[:,1],
        #                                       -numpy.ones(n)))
        return transformNormals(t, t_normal)

    def intersect(self, P1, P2, max_length):
        """
        
        @param p1: a (n,3) array of points, start of each ray
        @param p2: a (n,3) array of point, ends of the rays
        """
        n = P1.shape[0]  #returns how many points there are
        efl = self.EFL  #scalar
        h = 1 / (4 * efl)
        cen_cor = efl * self.EFL_centre

        #This was originally written with the Z axis as the long axis of the trough,
        #but inorder for the direction parameter to be useful and point from
        #vertex to focus, the y axis must be the long axis.  So, y and z are
        #here switched for all the following calculations and then switched back
        # right before the function returns its points

        P1[:, 1:] = numpy.fliplr(P1[:, 1:]).copy()
        P2[:, 1:] = numpy.fliplr(P2[:, 1:]).copy()

        #turn array of points into y = mx + q
        m = (P1[:, 1] - P2[:, 1]) / (P1[:, 0] - P2[:, 0])  # m = y1-y2 / x1-x2
        q = P1[:, 1] - m * P1[:, 0]  #q = y - mx

        #solve intersection of y = mx + b and y = h x^2 - EFL
        # 0 = hx^2 - mx - (b + EFL)
        # h = 1/(4*EFL)

        a = h
        b = -m
        c = -q - cen_cor

        d = b**2 - 4 * a * c

        e = numpy.sqrt(d)
        roots = [(-b + e) / (2 * a), (-b - e) / (2 * a)]

        root1, root2 = roots

        #put these roots into a list of intersection points using y = mx + q
        #I make these 3d with z=0, Which i'll fix later
        inter1 = numpy.array([root1, h * (root1**2) - cen_cor,
                              numpy.zeros(n)]).T
        inter2 = numpy.array([root2, h * (root2**2) - cen_cor,
                              numpy.zeros(n)]).T

        #Where the slope was infinite these values are wrong:

        perp_result = numpy.array(
            [P1[:, 0], h * P1[:, 0]**2 - cen_cor,
             numpy.zeros(n)]).T
        perp_fix = numpy.array([P1[:, 0] == P2[:, 0]] * 3).T

        inter1 = numpy.where(perp_fix, perp_result, inter1)
        inter2[perp_fix] = numpy.inf

        #Where the ray was parallel to the long axis, the above fix fixes wrong

        parallel_result = numpy.array(numpy.ones([n, 3]) * numpy.inf).T
        parallel_cond = numpy.logical_and([P1[:, 0] == P2[:, 0]],
                                          [P1[:, 1] == P2[:, 1]]).T
        parallel_fix = numpy.zeros((n, 3), dtype=bool)
        for i, z in enumerate(parallel_cond):
            parallel_fix[i] = z

        inter1[parallel_fix] = numpy.inf
        inter2[parallel_fix] = numpy.inf

        #and where there is a total miss, we want an inf, not a NaN
        miss_result = numpy.array(numpy.ones([n, 3]) * numpy.inf).T
        miss_fix = d < 0

        inter1[miss_fix] = numpy.inf
        inter2[miss_fix] = numpy.inf

        # Now, are the intersections along the direction of travel?

        s = P2[:, :2] - P1[:, :2]  #the 2d summed vector: v1 + vs = v2
        len_s = self.compute_length(P1[:, :2], P2[:, :2])  #and 2d length
        dead_ray = len_s == 0  #rays of length = 0 have nonsense normals
        s_n = s  #initialize the array
        for i, z in enumerate(s):  #normalize the vectors
            if dead_ray[i]:
                s_n[i] = numpy.zeros(s_n.shape[1])
            else:
                a = s[i, :] / len_s[i]
                s_n[i] = a

        s1 = inter1[:, :2] - P1[:, :2]
        len_s1 = self.compute_length(P1[:, :2], inter1[:, :2])
        dead_ray = len_s1 == 0
        s1_n = s1
        for i, z in enumerate(s1):
            if dead_ray[i]:
                s1_n[i] = numpy.zeros(s1_n.shape[1])
            else:
                a = s1[i, :] / len_s1[i]
                s1_n[i] = a

        s2 = inter2[:, :2] - P1[:, :2]
        len_s2 = self.compute_length(P1[:, :2], inter2[:, :2])
        dead_ray = len_s2 == 0
        s2_n = s2
        for i, z in enumerate(s1):
            if dead_ray[i]:
                s2_n[i] = numpy.zeros(s2_n.shape[1])
            else:
                a = s2[i, :] / len_s2[i]
                s2_n[i] = a

        #now use the normals to filter out intersections that are in the wrong direction

        backwards1 = numpy.zeros(s_n.shape, bool)
        backwards2 = backwards1.copy()

        #print inter1
        #print inter2

        # since both are vectors of length one in same dir or 180 deg apart,
        # addition should have len 2 or 0.

        for i, z in enumerate(s_n):

            temp = (s_n[i] + s1_n[i])**2
            backwards1[i] = sum(temp.T) < 1

            temp2 = (s_n[i] + s2_n[i])**2
            backwards2[i] = sum(temp2.T) < 1

        inter1[backwards1] = numpy.inf
        inter2[backwards2] = numpy.inf

        #print inter1
        #print inter2

        #now the z values can easily be interpolated:
        #change in z is proportional to total offest

        z1 = self.interpolate_z(P1, P2, inter1)
        z1[z1 - z1 != 0] = numpy.inf  #is z is a number, this will be false
        z2 = self.interpolate_z(P1, P2, inter2)
        z2[z2 - z2 != 0] = numpy.inf
        inter1[:, 2] = z1
        inter2[:, 2] = z2

        #Some of these supposed intersections dont actually hit the shape
        #within the given bounds

        X_bounds = self.X_bounds
        # Y_bounds, determined by x bounds: y = x^2
        half_len = self.length / 2.
        Z_bounds = numpy.array([-half_len, half_len])

        xmin, xmax = min(X_bounds), max(X_bounds)
        zmin, zmax = min(Z_bounds), max(Z_bounds)

        bounds_mask1 = numpy.zeros(inter1[:, 0].shape, dtype=bool)
        bounds_mask2 = numpy.zeros(inter2[:, 0].shape, dtype=bool)

        bounds_mask1 = inter1[:, 0] < xmin
        bounds_mask1 = numpy.logical_or(bounds_mask1, inter1[:, 0] > xmax)
        bounds_mask1 = numpy.logical_or(bounds_mask1, inter1[:, 2] < zmin)
        bounds_mask1 = numpy.logical_or(bounds_mask1, inter1[:, 2] > zmax)
        bounds_mask1 = numpy.array([bounds_mask1] * 3).T
        inter1[bounds_mask1] = numpy.inf

        bounds_mask2 = inter2[:, 0] < xmin
        bounds_mask2 = numpy.logical_or(bounds_mask2, inter2[:, 0] > xmax)
        bounds_mask2 = numpy.logical_or(bounds_mask2, inter2[:, 2] < zmin)
        bounds_mask2 = numpy.logical_or(bounds_mask2, inter2[:, 2] > zmax)

        bounds_mask2 = numpy.array([bounds_mask2] * 3).T
        inter2[bounds_mask2] = numpy.inf

        # next, use the distance from start to intersection to select the first
        # intersections if there are multiple

        select = self.compute_length(P1, inter2) < self.compute_length(
            P1, inter1)

        #shortest = numpy.where(select, root1, root2)
        #mmm, numpy.where didn't like selecting vectors for some reason
        # So, I'll do it long hand
        select = self.compute_length(P1, inter2) < self.compute_length(
            P1, inter1)
        actual = inter1.copy()
        for i, n in enumerate(inter1):
            if select[i]:
                actual[i] = inter2[i, :]
            else:
                actual[i] = inter1[i, :]

        #finally, be sure the ray length to intersection is longer than the tolerance

        #tol_mask = self.compute_length(P1, actual) < self.tolerance

        #tol_mask = numpy.array([tol_mask]*3).T
        #actual[tol_mask] = numpy.inf

        dtype = ([('length', 'f8'), ('face', 'O'), ('point', 'f8', 3)])
        result = numpy.empty(P1.shape[0], dtype=dtype)
        result['length'] = self.compute_length(P1, actual)

        #flip y and z back to the standard order
        actual[:, 1:] = numpy.fliplr(actual[:, 1:]).copy()
        result['face'] = self
        result['point'] = actual
        return result