예제 #1
0
파일: camera.py 프로젝트: nuds7/Moteur
	def update(self, target, angle):
		#self.target = target
		
		self.newPositionX 		= utils.weighted_average(self.newPositionX,target[0],self.rate[0])
		self.newPositionY 		= utils.weighted_average(self.newPositionY,target[1],self.rate[1])
		self.newWeightedScale 	= utils.weighted_average(self.newWeightedScale,self.scale,self.scaleRate)
		
		self.new_vel_zoom 	= utils.weighted_average(self.new_vel_zoom,self.vel_zoom,20)
		
		glViewport(0, 0, self.screen_size[0], self.screen_size[1])

		glMatrixMode(GL_PROJECTION)
		glLoadIdentity()

		# fov (120), aspect, near, far clipping planes
		gluPerspective(self.newWeightedScale, self.aspect, 10.0, -10.)
		#self.newAngle = ((self.newAngle*(30-1))+angle) / 30

		#glRotatef(self.newAngle,0.0,0.0,1.0)
		
		# position of the camera
		gluLookAt(self.newPositionX-self.new_vel_zoom, self.newPositionY+(self.new_vel_zoom*.5), +370,
				  self.newPositionX, self.newPositionY, 0,
				  sin(0),1,0.0)

		#glRotatef(0, 0., 1., 0.)

		#glScalef(2.0, 2.0, 2.0)
		#glTranslatef(self.newPositionX*-1, self.newPositionY*-1, 100)

		glMatrixMode(GL_MODELVIEW)
		glLoadIdentity()
예제 #2
0
파일: camera.py 프로젝트: Elizabwth/pyman
	def update(self, pos, target, angle, fov):
		
		glViewport(0, 0, self.screen_size[0], self.screen_size[1])

		glMatrixMode(GL_PROJECTION)
		glLoadIdentity()

		self.pos = (utils.weighted_average(self.pos[0],pos[0],self.pos_rate[0]),
					utils.weighted_average(self.pos[1],pos[1],self.pos_rate[1]),
					utils.weighted_average(self.pos[2],pos[2],self.pos_rate[2]))
		self.target = (utils.weighted_average(self.target[0],target[0],self.target_rate[0]),
					   utils.weighted_average(self.target[1],target[1],self.target_rate[1]),
					   utils.weighted_average(self.target[2],target[2],self.target_rate[2]))
		self.angle = utils.weighted_average(self.angle,angle,self.angle_rate)
		self.fov = utils.weighted_average(self.fov,fov,self.fov_rate)

		# fov (120), aspect, near, far clipping planes
		gluPerspective(self.fov, self.aspect, 10.0, -10.)

		glRotatef(self.angle,0.0,0.0,1.0)
		
		# position of the camera, target, up axis
		gluLookAt(self.pos[0], self.pos[1], self.pos[2]+self.zoom,
				  self.target[0], self.target[1], self.target[2],
				  0,1,0)

		glMatrixMode(GL_MODELVIEW)
		glLoadIdentity()
예제 #3
0
    def update(self, pos, target, angle, fov):

        glViewport(0, 0, self.screen_size[0], self.screen_size[1])

        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()

        self.pos = (utils.weighted_average(self.pos[0], pos[0],
                                           self.pos_rate[0]),
                    utils.weighted_average(self.pos[1], pos[1],
                                           self.pos_rate[1]),
                    utils.weighted_average(self.pos[2], pos[2],
                                           self.pos_rate[2]))
        self.target = (utils.weighted_average(self.target[0], target[0],
                                              self.target_rate[0]),
                       utils.weighted_average(self.target[1], target[1],
                                              self.target_rate[1]),
                       utils.weighted_average(self.target[2], target[2],
                                              self.target_rate[2]))
        self.angle = utils.weighted_average(self.angle, angle, self.angle_rate)
        self.fov = utils.weighted_average(self.fov, fov, self.fov_rate)

        # fov (120), aspect, near, far clipping planes
        gluPerspective(self.fov, self.aspect, 10.0, -10.)

        glRotatef(self.angle, 0.0, 0.0, 1.0)

        # position of the camera, target, up axis
        gluLookAt(self.pos[0], self.pos[1], self.pos[2] + self.zoom,
                  self.target[0], self.target[1], self.target[2], 0, 1, 0)

        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()
def sum_spectra_weighted_ave(obj, **kwargs):
    """
    This function takes a set of data and sums the individual bins by weighted
    average. That information is then assembled back into a single spectrum.
    The individual spectra should already have been rebinned.
    
    @param obj: Object containing data spectra
    @type obj: C{SOM.SOM} or C{SOM.SO}

    @param kwargs: A list of keyword arguments that the function accepts:
    
    @return: The summed spectra (one)
    @rtype: C{SOM.SOM}
    """

    if obj is None:
        return None

    # import the helper functions
    import hlr_utils

    # set up for working through data
    (result, res_descr) = hlr_utils.empty_result(obj)
    o_descr = hlr_utils.get_descr(obj)

    result = hlr_utils.copy_som_attr(result, res_descr, obj, o_descr)

    # Get the number of axis channels
    len_axis = len(obj[0])

    import nessi_list
    import SOM
    import utils

    # Empty SO for final spctrum
    so = SOM.SO()

    len_som = hlr_utils.get_length(obj)

    # Slice data, calculate weighted average and repackage spectra
    for i in xrange(len_axis):

        sliced_data = nessi_list.NessiList()
        sliced_data_err2 = nessi_list.NessiList()

        for j in xrange(len_som):
            obj1 = hlr_utils.get_value(obj, j, o_descr, "all")
            if i == 0 and j == 0:
                map_so = hlr_utils.get_map_so(obj, None, j)
                hlr_utils.result_insert(so, "SO", map_so, None, "all")

            sliced_data.append(obj1.y[i])
            sliced_data_err2.append(obj1.var_y[i])

        len_fit = len(sliced_data)

        value = utils.weighted_average(sliced_data, sliced_data_err2, 0,
                                       len_fit - 1)
        so.y[i] = value[0]
        so.var_y[i] = value[1]

    hlr_utils.result_insert(result, res_descr, so, None, "all")

    return result
예제 #5
0
파일: demo.py 프로젝트: ricardojdb/face-api
                recog_req = requests.post(
                    url=f'http://{host}:7006/predict',
                    data=encode_img(roi_color_wide),
                    timeout=5)
                recog = recog_req.json()
                fr_score, label = recog["dist"], recog["label"]
            except:
                traceback.print_exc(file=sys.stdout)
                fr_score, label = 0, " "

            time_stamp = datetime.now().strftime("%H:%M:%S")

            # Use exponentially weighted average to smooth the
            # changes in sentiment, age and position.
            if label in table_dict:
                age_ewa = utils.weighted_average(
                    age, table_dict[label][2], beta=0.998)

                scores_ewa = utils.weighted_average(
                    scores, table_dict[label][3:11], beta=0.5)

                box_ewa = utils.weighted_average(
                    [xmin, ymin, xmax, ymax],
                    table_dict[label][-2], beta=0.998)

                table_dict[label] = [
                    label, gender, age_ewa, *scores_ewa, box_ewa, time_stamp]
            else:
                table_dict[label] = [
                    label, gender, age, *scores,
                    [xmin, ymin, xmax, ymax], time_stamp]
def calculate_ref_background(obj, no_bkg, inst, peak_excl, **kwargs):
    """
    This function takes a set of reflectometer data spectra in TOF, slices the
    data along TOF channels, fits a linear function to the slice to determine
    the background and then reassembles the slice back into TOF spectra.

    @param obj: Object containing data spectra
    @type obj: C{SOM.SOM} or C{SOM.SO}

    @param no_bkg: Flag for actually requesting a background calculation
    @type no_bkg: C{boolean}

    @param inst: String containing the reflectometer short name
    @type inst: C{string} (I{REF_L} or I{REF_M})

    @param peak_excl: The bounding pixel IDs for peak exclusion from fit
    @type peak_excl: C{tuple} containging the minimum and maximum pixel ID
    
    @param kwargs: A list of keyword arguments that the function accepts:

    @keyword aobj: An alternate data object containing the sizing information
                   for the constructed background spectra.
    @type aobj: C{SOM.SOM} or C{SOM.SO}
    

    @return: Background spectra
    @rtype: C{SOM.SOM}
    """
    if obj is None:
        return None

    if no_bkg:
        return None

    # import the helper functions
    import hlr_utils

    # Setup instrument specific stuff
    if inst == "REF_L":
        inst_pix_id = 1
    elif inst == "REF_M":
        inst_pix_id = 0
    else:
        raise RuntimeError("Do not know how to deal with instrument %s" % inst)

    # Check keywords
    try:
        aobj = kwargs["aobj"]
    except KeyError:
        aobj = None

    # set up for working through data
    (result, res_descr) = hlr_utils.empty_result(obj)
    o_descr = hlr_utils.get_descr(obj)

    result = hlr_utils.copy_som_attr(result, res_descr, obj, o_descr)

    # Set spectrum object to obtain background SOs from
    if aobj is None:
        bobj = obj
    else:
        bobj = aobj

    # Get the number of spectra for background calculation
    len_som = len(obj)

    # Get the number of spectra for final background object
    if aobj is None:
        len_bsom = len(obj)
    else:
        len_bsom = len(aobj)
        
    # Get the number of TOF channels
    len_tof = len(obj[0])

    # Create blank SOs for background spectra
    so_list = []

    import nessi_list
    import SOM
    import utils

    # Setup pixel axes
    pix_axis = nessi_list.NessiList()
    if peak_excl is not None:
        pix_axis_no_peak = nessi_list.NessiList()

    # Fill pixel axes and background SOs
    for k in xrange(len_bsom):
        map_so = hlr_utils.get_map_so(bobj, None, k)

        cur_pix_id = map_so.id[1][inst_pix_id]

        pix_axis.append(cur_pix_id)
        if peak_excl is not None:
            if cur_pix_id < peak_excl[0] or cur_pix_id > peak_excl[1]:
                pix_axis_no_peak.append(cur_pix_id)

        so = SOM.SO()
        hlr_utils.result_insert(so, "SO", map_so, None, "all")
        so_list.append(so)

    # Slice data, calculate weighted average and repackage spectra
    for i in xrange(len_tof):

        sliced_data = nessi_list.NessiList()
        sliced_data_err2 = nessi_list.NessiList()

        for j in xrange(len_som):
            obj1 = hlr_utils.get_value(obj, j, o_descr, "all")
            cur_pix_id = obj1.id[1][inst_pix_id]

            if peak_excl is None:
                filter_pixel = False
            else:
                if cur_pix_id < peak_excl[0] or cur_pix_id > peak_excl[1]:
                    filter_pixel = False
                else:
                    filter_pixel = True

            if not filter_pixel:
                if not (utils.compare(obj1.var_y[i], 0.0) == 0 and \
                        utils.compare(obj1.y[i], 0.0) == 0):
                    sliced_data.append(obj1.y[i])
                    sliced_data_err2.append(obj1.var_y[i])

        len_fit = len(sliced_data)

        if not len_fit:
            value = (0.0, 0.0)
        else:
            value = utils.weighted_average(sliced_data, sliced_data_err2,
                                           0, len_fit-1)

        for j in xrange(len_bsom):
            so_list[j].y[i] = value[0]
            so_list[j].var_y[i] = value[1]

    for m in xrange(len_bsom):
        hlr_utils.result_insert(result, res_descr, so_list[m], None, "all")

    return result
def sum_spectra_weighted_ave(obj, **kwargs):
    """
    This function takes a set of data and sums the individual bins by weighted
    average. That information is then assembled back into a single spectrum.
    The individual spectra should already have been rebinned.
    
    @param obj: Object containing data spectra
    @type obj: C{SOM.SOM} or C{SOM.SO}

    @param kwargs: A list of keyword arguments that the function accepts:
    
    @return: The summed spectra (one)
    @rtype: C{SOM.SOM}
    """
    
    if obj is None:
        return None

    # import the helper functions
    import hlr_utils

    # set up for working through data
    (result, res_descr) = hlr_utils.empty_result(obj)
    o_descr = hlr_utils.get_descr(obj)

    result = hlr_utils.copy_som_attr(result, res_descr, obj, o_descr)

    # Get the number of axis channels
    len_axis = len(obj[0])

    import nessi_list
    import SOM
    import utils

    # Empty SO for final spctrum
    so = SOM.SO()

    len_som = hlr_utils.get_length(obj)

    # Slice data, calculate weighted average and repackage spectra
    for i in xrange(len_axis):

        sliced_data = nessi_list.NessiList()
        sliced_data_err2 = nessi_list.NessiList()

        for j in xrange(len_som):
            obj1 = hlr_utils.get_value(obj, j, o_descr, "all")
            if i == 0 and j == 0:
                map_so = hlr_utils.get_map_so(obj, None, j)
                hlr_utils.result_insert(so, "SO", map_so, None, "all")
            
            sliced_data.append(obj1.y[i])
            sliced_data_err2.append(obj1.var_y[i])

        len_fit = len(sliced_data)

        value = utils.weighted_average(sliced_data, sliced_data_err2,
                                       0, len_fit-1)
        so.y[i] = value[0]
        so.var_y[i] = value[1]

    hlr_utils.result_insert(result, res_descr, so, None, "all")

    return result
예제 #8
0
def weighted_average(obj, **kwargs):
    """
    This function takes a C{SOM} or C{SO} and calculates the weighted average
    for the primary axis.

    @param obj: Object that will have the weighted average calculated from it
    @type obj: C{SOM.SOM} or C{SOM.SO}

    @param kwargs: A list of keyword arguments that the function accepts:    

    @keyword start: The index of starting bin
    @type start: C{int}

    @keyword end: The index of ending bin
    @type end: C{int}    
    
    
    @return: Object containing the weighted average and the uncertainty
             squared associated with the weighted average
    @rtype: C{tuple} (for a C{SO}) or a C{list} of C{tuple}s (for a C{SOM})


    @raise TypeError: A C{tuple} or another construct (besides a C{SOM} or
                      C{SO}) is passed to the function
    """

    # import the helper functions
    import hlr_utils

    # set up for working through data
    # This time highest object in the hierarchy is NOT what we need
    result = []
    if (hlr_utils.get_length(obj) > 1):
        res_descr = "list"
    else:
        res_descr = "number"

    o_descr = hlr_utils.get_descr(obj)

    try:
        start = int(kwargs["start"])
    except KeyError:
        start = 0

    try:
        end = int(kwargs["end"])
    except KeyError:
        end = hlr_utils.get_length(obj) - 1

    result = hlr_utils.copy_som_attr(result, res_descr, obj, o_descr)

    # iterate through the values
    import utils

    for i in xrange(hlr_utils.get_length(obj)):
        val = hlr_utils.get_value(obj, i, o_descr, "y")
        err2 = hlr_utils.get_err2(obj, i, o_descr, "y")

        value = utils.weighted_average(val, err2, start, end)

        hlr_utils.result_insert(result, res_descr, value, None, "all")

    import copy
    return copy.deepcopy(result)
예제 #9
0
def calculate_ref_background(obj, no_bkg, inst, peak_excl, **kwargs):
    """
    This function takes a set of reflectometer data spectra in TOF, slices the
    data along TOF channels, fits a linear function to the slice to determine
    the background and then reassembles the slice back into TOF spectra.

    @param obj: Object containing data spectra
    @type obj: C{SOM.SOM} or C{SOM.SO}

    @param no_bkg: Flag for actually requesting a background calculation
    @type no_bkg: C{boolean}

    @param inst: String containing the reflectometer short name
    @type inst: C{string} (I{REF_L} or I{REF_M})

    @param peak_excl: The bounding pixel IDs for peak exclusion from fit
    @type peak_excl: C{tuple} containging the minimum and maximum pixel ID
    
    @param kwargs: A list of keyword arguments that the function accepts:

    @keyword aobj: An alternate data object containing the sizing information
                   for the constructed background spectra.
    @type aobj: C{SOM.SOM} or C{SOM.SO}
    

    @return: Background spectra
    @rtype: C{SOM.SOM}
    """
    if obj is None:
        return None

    if no_bkg:
        return None

    # import the helper functions
    import hlr_utils

    # Setup instrument specific stuff
    if inst == "REF_L":
        inst_pix_id = 1
    elif inst == "REF_M":
        inst_pix_id = 0
    else:
        raise RuntimeError("Do not know how to deal with instrument %s" % inst)

    # Check keywords
    try:
        aobj = kwargs["aobj"]
    except KeyError:
        aobj = None

    # set up for working through data
    (result, res_descr) = hlr_utils.empty_result(obj)
    o_descr = hlr_utils.get_descr(obj)

    result = hlr_utils.copy_som_attr(result, res_descr, obj, o_descr)

    # Set spectrum object to obtain background SOs from
    if aobj is None:
        bobj = obj
    else:
        bobj = aobj

    # Get the number of spectra for background calculation
    len_som = len(obj)

    # Get the number of spectra for final background object
    if aobj is None:
        len_bsom = len(obj)
    else:
        len_bsom = len(aobj)

    # Get the number of TOF channels
    len_tof = len(obj[0])

    # Create blank SOs for background spectra
    so_list = []

    import nessi_list
    import SOM
    import utils

    # Setup pixel axes
    pix_axis = nessi_list.NessiList()
    if peak_excl is not None:
        pix_axis_no_peak = nessi_list.NessiList()

    # Fill pixel axes and background SOs
    for k in xrange(len_bsom):
        map_so = hlr_utils.get_map_so(bobj, None, k)

        cur_pix_id = map_so.id[1][inst_pix_id]

        pix_axis.append(cur_pix_id)
        if peak_excl is not None:
            if cur_pix_id < peak_excl[0] or cur_pix_id > peak_excl[1]:
                pix_axis_no_peak.append(cur_pix_id)

        so = SOM.SO()
        hlr_utils.result_insert(so, "SO", map_so, None, "all")
        so_list.append(so)

    # Slice data, calculate weighted average and repackage spectra
    for i in xrange(len_tof):

        sliced_data = nessi_list.NessiList()
        sliced_data_err2 = nessi_list.NessiList()

        for j in xrange(len_som):
            obj1 = hlr_utils.get_value(obj, j, o_descr, "all")
            cur_pix_id = obj1.id[1][inst_pix_id]

            if peak_excl is None:
                filter_pixel = False
            else:
                if cur_pix_id < peak_excl[0] or cur_pix_id > peak_excl[1]:
                    filter_pixel = False
                else:
                    filter_pixel = True

            if not filter_pixel:
                if not (utils.compare(obj1.var_y[i], 0.0) == 0 and \
                        utils.compare(obj1.y[i], 0.0) == 0):
                    sliced_data.append(obj1.y[i])
                    sliced_data_err2.append(obj1.var_y[i])

        len_fit = len(sliced_data)

        if not len_fit:
            value = (0.0, 0.0)
        else:
            value = utils.weighted_average(sliced_data, sliced_data_err2, 0,
                                           len_fit - 1)

        for j in xrange(len_bsom):
            so_list[j].y[i] = value[0]
            so_list[j].var_y[i] = value[1]

    for m in xrange(len_bsom):
        hlr_utils.result_insert(result, res_descr, so_list[m], None, "all")

    return result
예제 #10
0
def weighted_average(obj, **kwargs):
    """
    This function takes a C{SOM} or C{SO} and calculates the weighted average
    for the primary axis.

    @param obj: Object that will have the weighted average calculated from it
    @type obj: C{SOM.SOM} or C{SOM.SO}

    @param kwargs: A list of keyword arguments that the function accepts:    

    @keyword start: The index of starting bin
    @type start: C{int}

    @keyword end: The index of ending bin
    @type end: C{int}    
    
    
    @return: Object containing the weighted average and the uncertainty
             squared associated with the weighted average
    @rtype: C{tuple} (for a C{SO}) or a C{list} of C{tuple}s (for a C{SOM})


    @raise TypeError: A C{tuple} or another construct (besides a C{SOM} or
                      C{SO}) is passed to the function
    """

    # import the helper functions
    import hlr_utils

    # set up for working through data
    # This time highest object in the hierarchy is NOT what we need
    result = []
    if(hlr_utils.get_length(obj) > 1):
        res_descr = "list"
    else:
        res_descr = "number"

    o_descr = hlr_utils.get_descr(obj)

    try:
        start = int(kwargs["start"])
    except KeyError:
        start = 0

    try:
        end = int(kwargs["end"])
    except KeyError:
        end = hlr_utils.get_length(obj) - 1
            
    result = hlr_utils.copy_som_attr(result, res_descr, obj, o_descr)

    # iterate through the values
    import utils
    
    for i in xrange(hlr_utils.get_length(obj)):
        val = hlr_utils.get_value(obj, i, o_descr, "y")
        err2 = hlr_utils.get_err2(obj, i, o_descr, "y")

        value = utils.weighted_average(val, err2, start, end)

        hlr_utils.result_insert(result, res_descr, value, None, "all")

    import copy
    return copy.deepcopy(result)