def test_stair_freeyaw(self):
        """
        """
        fname = '/home/dave/Repositories/public/0_davidovitch/'
        fname += 'freeyaw-ojf-wt-tests/data/calibrated/DataFrame/'
        fname += '0212_run_064_9.0ms_dc1_freeyawplaying_stiffblades'
        fname += '_coning_pwm1000_highrpm.h5'
        res = pd.read_hdf(fname, 'table')
        time = res.time.values
        sps = 1.0 / np.diff(time).mean()

        ff = Filters()

        cutoff_hz = 1.0
        order = 2
        Wn = cutoff_hz*2.0/sps
        B, A = sp.signal.butter(order, Wn, output='ba')
        yawf = sp.signal.filtfilt(B, A, res.yaw_angle.values)

        # YAW
        plt.figure('yaw')
        plt.plot(res.time, res.yaw_angle, 'r-')
        plt.plot(res.time, yawf, 'b-')

        B, A = sp.signal.butter(order, 1.0*2.0/sps, output='ba')
        yawf2 = sp.signal.filtfilt(B, A, res.yaw_angle.values)
        plt.plot(res.time, yawf2, 'k--')

        # RPM
        data = res.rpm.values
        data_f = ff.butter_lowpass(sps, data, order=2, cutoff_hz=1.0)

        plt.figure('rpm')
        plt.plot(res.time, data, 'r-')
        plt.plot(res.time, data_f, 'b-')


#        filtered_x, N, delay = ff.fir(time, res.rpm, cutoff_hz=1.0,
#                                      freq_trans_width=1.0, ripple_db=50.0)
#        plt.plot(res.time, filtered_x, 'k--')

        smooth_window = 2.0
        ws = int(smooth_window*sps)
        data_s = ff.smooth(res.rpm, window_len=ws, window='hanning')
        NN = len(data_s) - len(time)
        data_s = data_s[NN:]
#        time_s = time[NN:]
        plt.plot(time+(smooth_window/2.0), data_s, 'k--')

        # and up again in order not to brake the plotting further down
        time_down = np.arange(time[0], time[-1], 0.1)
        data_f_down = sp.interpolate.griddata(time, data_f, time_down)

        plt.plot(time_down, data_f_down, 'm-', alpha=0.7)

#        # and upsampling again
#        data = sp.interpolate.griddata(time_down, data_down, time)

        slope, intercept, r_value, p_value, std_err \
            = sp.stats.linregress(data_f_down, y=time_down)
Example #2
0
class WordsToBeKilled(unittest.TestCase):
    def setUp(self):
        self.filters = Filters()

    def tearDown(self):
        self.filters = None
        
    def testKnownWords(self):
        known_words = [u'知识']
        for w in known_words:
            self.assertTrue(self.filters.is_known_word(w))

    def testNonChineseword(self):
        non_chinese = [u'Chinese',
                       u'[',
                       u',',
                       u'   ']
        for w in non_chinese:
            self.assertTrue(self.filters.is_not_chinese_word(w))

    def testAA(self):
        aa_words = [u'天天',
                    u'年年']
        for w in aa_words:
            self.assertTrue(self.filters.is_AA(w))

    def testNumber(self):
        number_words = [u'千万',
                        u'几个亿',
                        u'一九三八',
                        u'十三万二千五百三十五']
        for w in number_words:
            self.assertTrue(self.filters.is_number(w))
Example #3
0
	def __build_resp_layers(self): 
		target_image = self.__grayscale
		image_size = (target_image.width, target_image.height)
		for s in Surf.SCALES:
			Dxx = cv.CloneImage(target_image)
			Dyy = cv.CloneImage(target_image)
			Dxy = cv.CloneImage(target_image)
			# Calculating convolutions
			cv.Filter2D(target_image, Dxx, 
					    Filters.fast_hessian(s, FastHessianType.DXX))
			cv.Filter2D(target_image, Dyy, 
					    Filters.fast_hessian(s, FastHessianType.DYY))		
			cv.Filter2D(target_image, Dxy, 
					    Filters.fast_hessian(s, FastHessianType.DXY))
			l = s / 3.0
			scale_factor1 = 1
			scale_factor2 = 1
			temp = cv.CloneImage(Dyy)
			cv.ConvertScale(Dyy, temp, scale=(1.0 / scale_factor1))
			#cv.SaveImage('D:\\hess\\dyy'+str(s)+'.jpg', temp)
			#cv.SaveImage('D:\\hess\\dyy'+str(s)+'.jpg', Dyy)
			#cv.SaveImage('D:\\hess\\dxy'+str(s)+'.jpg', Dxy)
			#Calculating hessian
			HessMatrS = cv.CreateImage(image_size, cv.IPL_DEPTH_64F, 1)
			DxxMultDyy = cv.CloneImage(target_image)
			cv.Mul(Dxx, Dyy, DxxMultDyy, 1.0 / scale_factor1**2)
			DxySquared = cv.CloneImage(target_image)
			cv.Mul(Dxy, Dxy, DxySquared, Surf.HESSIAN_RELATIVE_WEIGHT**2 / scale_factor2**2)
			cv.Sub(DxxMultDyy, DxySquared, HessMatrS)
			cv.SaveImage('D:\\hess\\hessian'+str(s)+'.jpg', HessMatrS)
			self.__responses.append(HessMatrS)
			#Calculating laplacian (trace of the hessian matrix)
			traceS = cv.CreateImage(image_size, cv.IPL_DEPTH_64F, 1)
			cv.Add(Dxx, Dyy, traceS)
			self.__traces.append(traceS)
Example #4
0
def analyze(settings):
    start_time = time.time()

    f = Filters(settings['code'], settings['filters'])
    tree = Tree(settings['path'], f.filters(), settings['antifilters'], settings)
    tree.output()

    time_sec = time.time() - start_time
    print ("\nanalyze time : {0:2.5f}sec".format(time_sec))
Example #5
0
 def prewittEdgeDetector(self,img,shape=(5,5)):
     filter = Filters()
     op = Operations()
     sx = filter.prewittKernel(shape,axis=0)
     dx = op.doConvolution(sx,img)
     sy = filter.prewittKernel(shape,axis=1)
     dy = op.doConvolution(sy,img)
     delta = np.power(np.add(np.power(dx,2),np.power(dy,2)),0.5)
     return delta
Example #6
0
 def __init__(self,testDf,
              mainDf,segmentsStepsDf,
              correlator,
              spread = 15,
              timeStep = 1,
              ):
     # original database
     self.SmoothedDf = pd.io.parsers.read_csv(mainDf,index_col = 'index')
     # source for section imitation
     self.testDf = pd.io.parsers.read_csv(testDf,index_col = 'index')
     self.segmentsStepsDf = pd.io.parsers.read_csv(segmentsStepsDf)
     self.powerCorrelator = correlator
     # output database contained predicted points
     self.predicted_df = None
     # by default the number of unpredicted segments is 0
     self.unpredicted = 0
     # the dictionary of coefficients of correlation
     self.corrCoeffs = {}
     # range of indexes at the test data frame
     # range between indexes of grabbed section.
     # Other words it is just about the time of user's waiting in seconds
     self.spread = spread
     # the time step as constant step between rows at the database
     self.timeStep = timeStep
     # The number of laccids,grabbed by user. For "byLacCidMod" algorithm it must be more then 2.
     # Otherwise, it will works as "byLacCid" algorithm
     # self.numLC = numLC
     self.filters = Filters()
Example #7
0
File: client.py Project: wezu/a4p
    def __init__(self):
        log.debug('Starting Client')
        #open a window... but first set all the needed props
        wp=self.loadWindoProperites()
        #open the window
        base.openMainWindow(props = wp)
        #base.setBackgroundColor(0.06, 0.1, 0.12, 1)
        base.setBackgroundColor(0.0, 0.0, 0.0, 1)
        base.disableMouse()
        base.enableParticles()

        #needed to determine what window event fired
        self.window_focused=base.win.getProperties().getForeground()
        self.window_x=base.win.getXSize()
        self.window_y=base.win.getYSize()
        self.window_minimized=base.win.getProperties().getMinimized()

        #filter manager, post process
        self.filters=Filters()

        #audio sound effects (sfx) + music
        self.audio=Audio()
        self.audio.setMusic('background')
        self.audio.playMusic()

        #light manager
        self.lights=LightManager()

        #setup the user interface (gui+key/mouse bind)
        self.ui=UserInterface()

        #skybox
        self.sun_and_sky=Skybox(self.lights)

        #player (character) droid
        self.droid=PCDroid(self.ui)

        #some vars used later
        self.map_name=None
        self.loading_status=set()
        self.level_root=render.attachNewNode('level_root')
        self.level_root.hide()
        self.is_in_game=False

        #events
        base.win.setCloseRequestEvent('exit-event')
        self.accept('exit-event',self.onClientExit)
        self.accept( 'window-event', self.onWindowEvent)
        self.accept( 'window-reset', self.onWindowReset)
        self.accept( 'client-mouselock', self.setMouseLock)
        self.accept( 'load-level', self.onLevelLoad)
        self.accept( 'loading-done', self.onLoadingDone)
        self.accept( 'reload-shaders', self.onShaderReload)
        self.accept( 'client-set-team', self.onTeamCahnge)
        self.accept( 'client-quit', self.onQuit)
        # Task
        taskMgr.add(self.update, 'client_update')

        log.debug('Client started')
    def test_linregress(self):
        """
        """
        fname = '/home/dave/Repositories/public/0_davidovitch/'
        fname += 'freeyaw-ojf-wt-tests/data/calibrated/DataFrame/'
        fname += '0212_run_064_9.0ms_dc1_freeyawplaying_stiffblades'
        fname += '_coning_pwm1000_highrpm.h5'
        res = pd.read_hdf(fname, 'table')
        time = res.time.values
        sps = 1.0 / np.diff(time).mean()

        freq_down = 0.1
        window = 4.0

        ff = Filters()

        data = res.rpm.values
        data_f = ff.butter_lowpass(sps, data, order=2, cutoff_hz=1.0)
        time_down = np.arange(time[0], time[-1], freq_down)
        data_f_down = sp.interpolate.griddata(time, data_f, time_down)
        regress = ff.linregress(time_down, data_f_down, int(window/freq_down))
        diff = np.diff(data_f_down) / freq_down

        plt.figure('rpm')
        plt.plot(time, data, 'r-')
        plt.plot(time_down, data_f_down, 'k--')
        plt.twinx()
        plt.plot(time_down[:-int(window/freq_down)], np.abs(regress[:,0]), 'b--')
        plt.plot(time_down[:-1], np.abs(diff), 'g--')
        plt.ylim([0, 5])
        plt.grid()

        data = res.yaw_angle.values
        data_f = ff.butter_lowpass(sps, data, order=2, cutoff_hz=1.0)
        data_f_down = sp.interpolate.griddata(time, data_f, time_down)
        regress = ff.linregress(time_down, data_f_down, int(window/freq_down))
        diff = np.diff(data_f_down) / freq_down

        plt.figure('yaw')
        plt.plot(time, data, 'r-')
        plt.plot(time_down, data_f_down, 'k--')
        plt.twinx()
        plt.plot(time_down[:-int(window/freq_down)], np.abs(regress[:,0]), 'b--')
        plt.plot(time_down[:-1], np.abs(diff), 'g--')
        plt.ylim([0, 5])
        plt.grid()
Example #9
0
  def __init__(self):
    """
    Configuration
    """

    # Camera settings
    self.FRAME_WIDTH = 341
    self.FRAME_HEIGHT = 256
    self.flip_camera = True # Mirror image
    self.camera = cv2.VideoCapture(1)

    # ...you can also use a test video for input
    #video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov"
    #self.camera = cv2.VideoCapture(video)
    #self.skip_input(400) # Skip to an interesting part of the video

    if not self.camera.isOpened():
        print "couldn't load webcam"
        return
    #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH)
    #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT)

    self.filters_dir = "filters/" # Filter settings in trackbar
    self.filters_file = "filters_default"

    # Load filter settings
    current_config = self.filters_dir + self.filters_file
    self.filters = Filters(current_config)

    # No actions will be triggered in test mode
    # (can be used to adjust settings at runtime)
    self.test_mode = False

    # Create a hand detector
    # In fact, this is a wrapper for many detectors
    # to increase detection confidence
    self.detector = Detector(self.filters.config)

    # Knowledge base for all detectors
    self.kb = KB()
    # Create gesture recognizer.
    # A gesture consists of a motion and a hand state.
    self.gesture = Gesture()

    # The action module executes keyboard and mouse commands
    self.action = Action()

    # Show output of detectors
    self.output = Output()

    self.run()
Example #10
0
class WordExtractor(object):

    def __init__(self, output_file, get_word_freq = None):
        self.get_word_freq = get_word_freq
        self.new_words = wordb.open(output_file)
        self.filters = Filters()
        self.n_killed = 0
        self.n_added = 0

    def __call__(self, words):
        self.process_words(words, threshold=2560000)
        
    def process_files(files):
        """process file in batch
        """
        for fn in files:
            with codecs.open(fn, 'r', 'utf-8') as f:
                self.process_file(f)

    def process_file(self, input_file):
        """process segmented file
        """
        words = set()
        for line in input_file:
            words.add(set(line.split(u'/')))
        self.process_words(words)

    def process_words(self, words, threshold=30000):
        for word in words:
            if self.filters.keep(word) and \
               word not in self.new_words:
                if self.get_word_freq:
                    freq = self.get_word_freq(word)
                    if freq > threshold:
                        logging.info("%s\tadded into db" % word)
                        self.new_words[word] = freq
                else:
                    logging.info("%s\tadded into db" % word)
                    self.new_words[word] = 1
                self.n_added += 1
            else:
                self.n_killed +=1
Example #11
0
 def prepareToCorrelation(self,df,by):
     """
     prepare dataframe to correlation algorithm
     :param df: input df {pd.DataFrame}
     :param by: group by column
     :return: df                     : input dataframe from
              originFrame            : frames to move
              analyzedFrameUpdated   : stable main frame contains the most really coordinates
              fewDataFrame           : lack of data frame
              readyToCorr            : if data frame is ready to be pushed to correlation algorithm
     """
     _df = df.copy()
     fewDataFrame = pd.DataFrame()
     if by == 'User':
         self.startPoint,noises = Filters.noisyUser(_df,by,col = 'rawPower')
         #df = self.filters.rollingMean(df,'Power',by = by,noises = noises)
     if by == 'race_id':
         self.startPoint = _df['race_id'].iloc[0]
     analyzedFrame,originFrame = self.splitFrameByMinLen(_df,by)
     analyzedFrameUpdated,readyToCorr = self.checkBoundaries(originFrame,analyzedFrame,by)
     if (not readyToCorr):
         _df.drop(analyzedFrame.index,inplace=True)
         fewDataFrame = analyzedFrame
     return _df,originFrame,analyzedFrameUpdated,fewDataFrame,readyToCorr
Example #12
0
    def sort(self, elem_links, url):
        fex = Faup()
        f = Filters()
        f.load()
        self.r.switchDB(1)
        extend = True
        domainfilter = True
        schemefilter = True
        try:
            for link in elem_links:
                new_url = link
                self.r.switchDB(2)
                if not self.r.get(new_url) and new_url:
                    self.r.switchDB(1)
                    if not self.r.get(new_url):
                        fex.decode(new_url)
                        domain = fex.get_host()
                        if f.isfilteredscheme(fex.get_scheme()):
                            self.r.switchDB(2)
                            self.r.put(new_url, new_url)
                            schemefilter = False
                        if f.isfiltereddomains(domain):
                            self.r.switchDB(2)
                            self.r.put(new_url, new_url)
                            domainfilter = False
                        if f.isfilteredextention(fex.get_resource_path()):
                            extend = False
                            self.r.switchDB(2)
                            self.r.put(new_url, new_url)

                        if extend and domainfilter and schemefilter:
                            self.r.switchDB(1)
                            self.r.rpush('crawl', new_url)
                            self.queue.append(new_url)
        except TypeError as e:
            print "TypeError"
Example #13
0
 def __init__(self, output_file, get_word_freq = None):
     self.get_word_freq = get_word_freq
     self.new_words = wordb.open(output_file)
     self.filters = Filters()
     self.n_killed = 0
     self.n_added = 0
Example #14
0
def addReviewFilters(db, creator, user, review, reviewer_directory_ids, reviewer_file_ids, watcher_directory_ids, watcher_file_ids):
    cursor = db.cursor()

    cursor.execute("INSERT INTO reviewassignmentstransactions (review, assigner) VALUES (%s, %s) RETURNING id", (review.id, creator.id))
    transaction_id = cursor.fetchone()[0]

    def add(filter_type, directory_ids, file_ids):
        for directory_id, file_id in izip(directory_ids, file_ids):
            cursor.execute("""SELECT id, type
                                FROM reviewfilters
                               WHERE review=%s
                                 AND uid=%s
                                 AND directory=%s
                                 AND file=%s""",
                           (review.id, user.id, directory_id, file_id))

            row = cursor.fetchone()

            if row:
                old_filter_id, old_filter_type = row

                if old_filter_type == filter_type:
                    continue
                else:
                    cursor.execute("""DELETE FROM reviewfilters
                                            WHERE id=%s""",
                                   (old_filter_id,))
                    cursor.execute("""INSERT INTO reviewfilterchanges (transaction, uid, directory, file, type, created)
                                           VALUES (%s, %s, %s, %s, %s, false)""",
                                   (transaction_id, user.id, directory_id, file_id, old_filter_type))

            cursor.execute("""INSERT INTO reviewfilters (review, uid, directory, file, type, creator)
                                   VALUES (%s, %s, %s, %s, %s, %s)""",
                           (review.id, user.id, directory_id, file_id, filter_type, creator.id))
            cursor.execute("""INSERT INTO reviewfilterchanges (transaction, uid, directory, file, type, created)
                                   VALUES (%s, %s, %s, %s, %s, true)""",
                           (transaction_id, user.id, directory_id, file_id, filter_type))

    add("reviewer", reviewer_directory_ids, repeat(0))
    add("reviewer", repeat(0), reviewer_file_ids)
    add("watcher", watcher_directory_ids, repeat(0))
    add("watcher", repeat(0), watcher_file_ids)

    filters = Filters()
    filters.load(db, review=review, user=user)

    if user not in review.reviewers and user not in review.watchers and user not in review.owners:
        cursor.execute("""INSERT INTO reviewusers (review, uid, type)
                          VALUES (%s, %s, 'manual')""",
                       (review.id, user.id,))

    delete_files = set()
    insert_files = set()

    if watcher_directory_ids or watcher_file_ids:
        # Unassign changes currently assigned to the affected user.
        cursor.execute("""SELECT reviewfiles.id, reviewfiles.file
                            FROM reviewfiles
                            JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
                           WHERE reviewfiles.review=%s
                             AND reviewuserfiles.uid=%s""",
                       (review.id, user.id))

        for review_file_id, file_id in cursor:
            if not filters.isReviewer(db, user.id, file_id):
                delete_files.add(review_file_id)

    if reviewer_directory_ids or reviewer_file_ids:
        # Assign changes currently not assigned to the affected user.
        cursor.execute("""SELECT reviewfiles.id, reviewfiles.file
                            FROM reviewfiles
                            JOIN changesets ON (changesets.id=reviewfiles.changeset)
                            JOIN commits ON (commits.id=changesets.child)
                            JOIN gitusers ON (gitusers.id=commits.author_gituser)
                 LEFT OUTER JOIN usergitemails USING (email)
                 LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id AND reviewuserfiles.uid=%s)
                           WHERE reviewfiles.review=%s
                             AND (usergitemails.uid IS NULL OR usergitemails.uid!=%s)
                             AND reviewuserfiles.uid IS NULL""",
                       (user.id, review.id, user.id))

        for review_file_id, file_id in cursor:
            if filters.isReviewer(db, user.id, file_id):
                insert_files.add(review_file_id)

    if delete_files:
        cursor.executemany("DELETE FROM reviewuserfiles WHERE file=%s AND uid=%s",
                           izip(delete_files, repeat(user.id)))
        cursor.executemany("INSERT INTO reviewassignmentchanges (transaction, file, uid, assigned) VALUES (%s, %s, %s, false)",
                           izip(repeat(transaction_id), delete_files, repeat(user.id)))

    if insert_files:
        cursor.executemany("INSERT INTO reviewuserfiles (file, uid) VALUES (%s, %s)",
                           izip(insert_files, repeat(user.id)))
        cursor.executemany("INSERT INTO reviewassignmentchanges (transaction, file, uid, assigned) VALUES (%s, %s, %s, true)",
                           izip(repeat(transaction_id), insert_files, repeat(user.id)))

    return generateMailsForAssignmentsTransaction(db, transaction_id)
Example #15
0
def getReviewersAndWatchers(db, repository, commits=None, changesets=None, reviewfilters=None, applyfilters=True, applyparentfilters=False, parentfiltersonly=False):
    """getReviewersAndWatchers(db, commits=None, changesets=None) -> tuple

Returns a tuple containing two dictionaries, each mapping file IDs to
dictionaries mapping user IDs to sets of changeset IDs.  The first dictionary
defines the reviwers of each file, the second dictionary defines the watchers of
each file.  For any changes in a file for which no reviewer is identified, None
is used as a key in the dictionary instead of a real user ID."""

    if changesets is None:
        changesets = []
        changeset_utils.createChangesets(db, repository, commits)
        for commit in commits:
            changesets.extend(changeset_utils.createChangeset(db, None, repository, commit, do_highlight=False))

    cursor = db.cursor()

    filters = Filters()

    if applyfilters:
        if parentfiltersonly:
            filters.load(db, repository=repository.parent, recursive=True)
        else:
            filters.load(db, repository=repository, recursive=applyparentfilters)

    if reviewfilters:
        filters.addFilters(db, reviewfilters, sort=True)

    reviewers = {}
    watchers = {}

    for changeset in changesets:
        author_user_id = changeset.child.author.getUserId(db) if changeset.child else None

        cursor.execute("SELECT DISTINCT file FROM fileversions WHERE changeset=%s", (changeset.id,))

        for (file_id,) in cursor:
            reviewers_found = False

            for user_id, (filter_type, delegate) in filters.listUsers(db, file_id).items():
                try: assert isinstance(user_id, int)
                except: raise Exception, repr(filters.listUsers(db, file_id))

                if filter_type == 'reviewer':
                    if author_user_id != user_id:
                        reviewer_user_ids = [user_id]
                    elif delegate:
                        reviewer_user_ids = []
                        for delegate_user_name in delegate.split(","):
                            delegate_user = dbutils.User.fromName(db, delegate_user_name)
                            if delegate_user: reviewer_user_ids.append(delegate_user.id)
                            else: raise Exception, repr((user_id, delegate_user_name, file_id))
                    else:
                        reviewer_user_ids = []

                    for reviewer_user_id in reviewer_user_ids:
                        reviewers.setdefault(file_id, {}).setdefault(reviewer_user_id, set()).add(changeset.id)
                        reviewers_found = True
                else:
                    watchers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset.id)

            if not reviewers_found:
                reviewers.setdefault(file_id, {}).setdefault(None, set()).add(changeset.id)

    return reviewers, watchers
Example #16
0
 def marrHildrethDetector(self,img,slope_threshold,shape=(5,5),sigma=5):
     filter = Filters()
     op = Operations()
     log = filter.laplaceOfGaussianKernel(shape,sigma)
     detect = op.doConvolution(log,img)
     return detect
    def setup_filter(self, time, data, **kwargs):
        """
        Load the callibration runs and convert voltage signal to yaw angles

        Parameters
        ----------

        time : ndarray(k)

        data : ndarray(k)

        Returns
        -------

        time_stair : ndarray(n)
            Average time stamp over the stair step

        data_stair : ndarray(n)
            Average value of the selected stair step


        """
        # time and data should both be 1D and have the same shape!
        assert time.shape == data.shape

        runid = kwargs.get('runid', self.runid)

        # smoothen method: spline or moving average
        smoothen = kwargs.get('smoothen', 'spline')
        # what is the window of the moving average in seconds
        smooth_window = kwargs.get('smooth_window', 2)

        # specify the window of the staircase
        #start, end = 30100, -30001
        start = kwargs.get('start', 0)
        end = kwargs.get('end', len(time))
        dt = kwargs.get('dt', 1)
        cutoff_hz = kwargs.get('cutoff_hz', None)
        self.points_per_stair = kwargs.get('points_per_stair', 20)
        # at what is the minimum required value on dt or dt2 for a new stair
        self.stair_step_tresh = kwargs.get('stair_step_tresh', 1)

#        plot_data = kwargs.get('plot_data', False)
#        respath = kwargs.get('respath', None)
#        run = kwargs.get('run', None)

        # sample rate of the signal
        sample_rate = calc_sample_rate(time)

        # prepare the data
        time = time[start:end]
        # the actual raw signal
        data = data[start:end]

        # -------------------------------------------------
        # Progress plotting
        # ----------------------------------------------
        if self.plt_progress:
            plt.figure()
            Pxx, freqs = plt.psd(data, Fs=sample_rate, label='data')
            plt.show()

            plt.figure()
            plt.plot(time, data, label='raw data')

        # -------------------------------------------------
        # setup plot
        # -------------------------------------------------
#        labels = np.ndarray(3, dtype='<U100')
#        labels[0] = label
#        labels[1] = 'yawchan derivative'
#        labels[2] = 'psd'

        # remove any underscores for latex printing
        grandtitle = self.figfile.replace('_', '\_')
        plot = plotting.A4Tuned(scale=1.5)
        plot.setup(self.figpath+self.figfile+'_filter', nr_plots=3,
                   grandtitle=grandtitle, wsleft_cm=1.5, wsright_cm=1.8,
                   hspace_cm=1.2, size_x_perfig=10, size_y_perfig=5,
                   wsbottom_cm=1.0, wstop_cm=1.5)

        # -------------------------------------------------
        # plotting original and smoothend signal
        # -------------------------------------------------
        ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 1)
        ax1.plot(time, data, 'b', label='raw data', alpha=0.6)
        data_raw = data.copy()

        # -------------------------------------------------
        # signal frequency filtering, if applicable
        # -------------------------------------------------
        # filter the local derivatives if applicable
        if cutoff_hz:
            filt = Filters()
            data_filt, N, delay = filt.fir(time, data, ripple_db=20,
                            freq_trans_width=0.5, cutoff_hz=cutoff_hz,
                            figpath=self.figpath,
                            figfile=self.figfile + 'filter_design',
                            sample_rate=sample_rate, plot=False,)

            if self.plt_progress:
                # add the results of the filtering technique
                plt.plot(time[N-1:], data_filt[N-1:], 'r', label='freq filt')

            data = data_filt
            time = time[N-1:]#-delay

        else:
            N = 1

        # -------------------------------------------------------
        # smoothen the signal with some splines or moving average
        # -------------------------------------------------------
        # NOTE: the smoothing will make the transitions also smoother. This
        # is not good. The edges of the stair need to be steep!
        # for the binary data this is actually a good thing, since the dt's
        # are almost always the same between time steps. We would otherwise
        # need a dt based on several time steps
        if smoothen == 'splines':
            print 'start applying spline ...',
            uni_spline = UnivariateSpline(time, data)
            data = uni_spline(time)
            print 'done!'
            NN = 0 # no time shift due to filtering?
            if self.plt_progress:
                plt.plot(time, data, label='spline data')

        elif smoothen == 'moving':
            print 'start calculating movering average ...',
            filt = Filters()
            # take av2s window, calculate the number of samples per window
            ws = int(smooth_window*sample_rate)
            data = filt.smooth(data, window_len=ws, window='hanning')
            NN = len(data) - len(time)
            data = data[NN:]
            print 'done!'

            if self.plt_progress:
                plt.plot(time, data, label='moving average')

        else:
            raise ValueError, 'smoothen method should be moving or splines'

        # -------------------------------------------------
        # additional smoothening: downsampling
        # -------------------------------------------------
        # and up again in order not to brake the plotting further down
        time_down = np.arange(time[0], time[-1], 0.1)
        data_down = sp.interpolate.griddata(time, data, time_down)
        # and upsampling again
        data = sp.interpolate.griddata(time_down, data_down, time)

        # -------------------------------------------------
        # plotting original and smoothend signal
        # -------------------------------------------------
        ax1.plot(time, data, 'r', label='data smooth')
        ax1.grid(True)
        leg1 = ax1.legend(loc='best')
        leg1.get_frame().set_alpha(0.5)
        ax1.set_title('smoothing method: ' + smoothen)

        # -------------------------------------------------
        # local derivatives of the signal and filtering
        # -------------------------------------------------
        data_dt = np.ndarray(data.shape)
        data_dt[1:] = data[1:] - data[0:-1]
        data_dt[0] = np.nan
        data_dt = np.abs(data_dt)

        # frequency filter was applied here originally
        data_filt_dt = data_dt

        # if no threshold is given, just take the 20% of the max value
        dt_max = np.nanmax(np.abs(data_filt_dt))*0.2
        dt_treshold = kwargs.get('dt_treshold', dt_max)

        # -------------------------------------------------
        # filter dt or dt2 above certain treshold?
        # -----------------------------------------------
        # only keep values which are steady, meaning dt signal is low!

        if dt == 2:
            tmp = np.ndarray(data_filt_dt.shape)
            tmp[1:] = data_filt_dt[1:] - data_filt_dt[0:-1]
            tmp[0] = np.nan
            data_filt_dt = tmp
        # based upon the filtering, only select data points for which the
        # filtered derivative is between a certain treshold
        staircase_i = np.abs(data_filt_dt).__ge__(dt_treshold)
        # reduce to 1D
        staircase_arg=np.argwhere(np.abs(data_filt_dt)<=dt_treshold).flatten()

        # -------------------------------------------------
        # replace values for too high dt with Nan
        # ------------------------------------------------

        # ---------------------------------
        # METHOD version2, slower because of staircase_arg computation above
        data_masked = data.copy()
        data_masked[staircase_i] = np.nan

        data_masked_dt = data_filt_dt.copy()
        data_masked_dt[staircase_i] = np.nan

        data_trim = data[staircase_arg]
        time_trim = time[staircase_arg]

        print 'max in data_masked_dt:', np.nanmax(data_masked_dt)
        # ---------------------------------
        # METHOD version2, faster if staircase_arg is not required!
        ## make a copy of the original signal and fill in Nans on the selected
        ## values
        #data_masked = data.copy()
        #data_masked[staircase_i] = np.nan
        #
        #data_masked_dt = data_filt_dt.copy()
        #data_masked_dt[staircase_i] = np.nan
        #
        ## remove all the nan values
        #data_trim = data_masked[np.isnan(data_masked).__invert__()]
        #time_trim = time[np.isnan(data_masked).__invert__()]
        #
        #dt_noise_treshold = np.nanmax(data_masked_dt)
        #print 'max in data_masked_dt', dt_noise_treshold
        # ---------------------------------

#        # figure out which dt's are above the treshold
#        data_trim2 = data_trim.copy()
#        data_trim2.sort()
#        data_trim2.
#        # where the dt of the masked format is above the noise treshold,
#        # we have a stair
#        data_trim_dt = np.abs(data_trim[1:] - data_trim[:-1])
#        argstairs = data_trim_dt.__gt__(dt_noise_treshold)
#        data_trim2 = data_trim_dt.copy()
#        data_trim_dt.sort()
#        data_trim_dt.__gt__(dt_noise_treshold)

        # -------------------------------------------------
        # intermediate checking of the signal
        # -------------------------------------------------
        if self.plt_progress:
            # add the results of the filtering technique
            plt.plot(time[N-1:], data_masked[N-1:], 'rs', label='data red')
            plt.legend(loc='best')
            plt.grid(True)
            plt.twinx()
#            plt.plot(time, data_filt_dt, label='data_filt_dt')
            plt.plot(time, data_masked_dt, 'm', label='data\_masked\_dt',
                     alpha=0.4)
            plt.legend(loc='best')
            plt.show()
            print 'saving plt_progress:',
            print self.figpath+'filter_design_progress.png'
            plt.savefig(self.figpath+'filter_design_progress.png')

        # -------------------------------------------------
        # check if we have had sane filtering
        # -------------------------------------------------

        print 'data      :', data.shape
        print 'data_trim :', data_trim.shape
        print 'trim ratio:', len(data)/len(data_trim)

        # there should be at least one True value
        assert staircase_i.any()
        # they can't all be True, than filtering is too heavy
        if len(data_trim) < len(data)*0.01:
            msg = 'dt_treshold is too low, not enough data left'
            raise ValueError, msg
        # if no data is filtered at all, filtering is too conservative
        elif len(data_trim) > len(data)*0.95:
            msg = 'dt_treshold is too high, too much data left'
            raise ValueError, msg
        # if the data array is too big, abort on memory concerns
        if len(data_trim) > 200000:
            msg = 'too much data points for stair case analysis (cfr memory)'
            raise ValueError, msg

        # -------------------------------------------------
        # read the average value over each stair (time and data)
        # ------------------------------------------------
        #try:
            ##np.save('time_trim', time_trim)
            ##np.save('data_trim', data_trim)
            ##np.save('staircase_arg', staircase_arg)
            ##tmp = np.array([self.points_per_stair, self.stair_step_tresh])
            ##np.save('tmp', tmp)
            #data_ordered, time_stair, data_stair, arg_stair \
                #= cython_func.order_staircase(time_trim, data_trim,
                #staircase_arg, self.points_per_stair, self.stair_step_tresh)
        #except ImportError:
        data_ordered, time_stair, data_stair, arg_stair \
            = self.order_staircase(time_trim, data_trim, staircase_arg)

        # convert the arg_stair to a flat set and replace start/stop pairs
        # with all indices in between. Now we can select all stair values
        # in the raw dataset
        arg_st_fl = np.empty(data_raw.shape, dtype=np.int)
        i = 0
        for k in range(arg_stair.shape[1]):
            #print '%6i %6i' % (arg_stair[0,k],arg_stair[1,k])
            tmp = np.arange(arg_stair[0,k], arg_stair[1,k]+1, 1, dtype=np.int)
            #print tmp, '->', i, ':', i+len(tmp)
            arg_st_fl[i:i+len(tmp)] = tmp
            i += len(tmp)
        # remove the unused elements from the array
        arg_st_fl = arg_st_fl[:i]

        # -------------------------------------------------
        # plotting of smoothen signal and stairs
        # -------------------------------------------------
        ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 2)
        ax1.plot(time, data, label='data smooth', alpha=0.6)
        # add the results of the filtering technique

        ax1.plot(time[N-1:], data_masked[N-1:], 'r', label='data masked')
#        ax1.plot(time[N-1:], data_filt[N-1:], 'g', label='data_filt')
        # also include the selected chair data
        figlabel = '%i stairs' % data_stair.shape[0]
        ax1.plot(time_stair, data_stair, 'ko', label=figlabel, alpha=0.4)
        ax1.grid(True)
        # the legend, on or off?
        #leg1 = ax1.legend(loc='upper left')
        #leg1.get_frame().set_alpha(0.5)
        # -------------------------------------------------
        # plotting derivatives on right axis
        # -------------------------------------------------
        ax1b = ax1.twinx()
#        ax1b.plot(time[N:]-delay,data_s_dt[N:],alpha=0.2,label='data_s_dt')
        ax1b.plot(time[N:], data_filt_dt[N:], 'r', alpha=0.35,
                  label='data\_filt\_dt')
        majorFormatter = FormatStrFormatter('%8.1e')
        ax1b.yaxis.set_major_formatter(majorFormatter)
#        ax1b.plot(time[N:], data_masked_dt[N:], 'b', alpha=0.2,
#                  label='data_masked_dt')
#        ax1b.plot(time[N-1:]-delay, filtered_x_dt[N-1:], alpha=0.2)
#        leg1b = ax1b.legend(loc='best')
#        leg1b.get_frame().set_alpha(0.5)
#        ax1b.grid(True)

        # -------------------------------------------------
        # 3th plot to check if the raw chair signal is ok
        # -------------------------------------------------

        ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 3)
        ax1.plot(time[arg_st_fl], data_raw[arg_st_fl], 'k+', label='rawstair',
                 alpha=0.1)
        ax1.plot(time[N-1:], data_masked[N-1:], 'r', label='data masked')
        ax1.set_xlabel('time [s]')

        # -------------------------------------------------
        # the power spectral density
        # -------------------------------------------------
#        ax3 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 3)
#        Pxx, freqs = ax3.psd(data, Fs=sample_rate, label='data smooth')
##        Pxx, freqs = ax3.psd(data_dt, Fs=sample_rate, label='data_dt')
##        Pxx, freqs = ax3.psd(data_filt_dt[N-1:], Fs=sample_rate,
##                             label='data_filt_dt')
#        ax3.legend()
##        print Pxx.shape, freqs.shape

        plot.save_fig()

        # -------------------------------------------------
        # get amplitudes of the stair edges
        # -------------------------------------------------

#        # max step
#        data_trim_dt_sort = data_trim_dt.sort()[0]
#        # estimate at what kind of a delta we are looking for when changing
#        # stairs
#        data_dt_std = data_trim_dt.std()
#        data_dt_mean = (np.abs(data_trim_dt)).mean()
#
#        time_data_dt = np.transpose(np.array([time, data_filt_dt]))
#        data_filt_dt_amps = HawcPy.dynprop().amplitudes(time_data_dt, h=1e-3)
#
#        print '=== nr amplitudes'
#        print len(data_filt_dt_amps)
#        print data_filt_dt_amps

        # -------------------------------------------------
        # save the data
        # -------------------------------------------------

        filename = runid + '-time_stair'
        np.savetxt(self.pprpath + filename, time_stair)
        filename = runid + '-data_stair'
        np.savetxt(self.pprpath + filename, data_stair)

        # in order to maintain backwards compatibility, save the arguments
        # of the stair to self
        self.arg_st_fl = arg_st_fl # flat, contains all indices on the stairs
        # start/stop indeces for stair k = arg_stair[0,k], arg_stair[1,k]
        self.arg_stair = arg_stair


        return time_stair, data_stair
    def setup_filter(self, respath, run, **kwargs):
        """
        Load the callibration runs and convert voltage signal to yaw angles
        """

        # specify the window of the staircase
        #start, end = 30100, -30001
        start = kwargs.get('start', None)
        end = kwargs.get('end', None)
        figpath = kwargs.get('figpath', None)
#        figfile = kwargs.get('figfile', None)
        dt_treshold = kwargs.get('dt_treshold', None)
#        plot_data = kwargs.get('plot_data', False)
#        respath = kwargs.get('respath', None)
#        run = kwargs.get('run', None)

        # load the dspace mat file
        dspace = ojfresult.DspaceMatFile(respath + run)
        # the yaw channel
        ch = 6
        # or a more robust way of determining the channel number
        ch = dspace.labels_ch['Yaw Laser']

        # sample rate of the signal
        sample_rate = calc_sample_rate(dspace.time)

        # file name based on the run file
        figfile = dspace.matfile.split('/')[-1] + '_ch' + str(ch)

        # prepare the data
        time = dspace.time[start:end]
        # the actual yaw signal
        data = dspace.data[start:end,ch]

        # -------------------------------------------------
        # smoothen the signal with some splines
        # -------------------------------------------------
        # NOTE: the smoothing will make the transitions also smoother. This
        # is not good. The edges of the stair need to be steep!
#        smoothen = UnivariateSpline(dspace.time, dspace.data[:,ch], s=2)
#        data_s_full = smoothen(dspace.time)
#        # first the derivatices
#        data_s_dt = data_s_full[start+1:end+1]-data_s_full[start:end]
#        # than cut it off
#        data_s = data_s_full[start:end]

        # -------------------------------------------------
        # local derivatives of the yaw signal and filtering
        # -------------------------------------------------
        data_dt = dspace.data[start+1:end+1,ch]-dspace.data[start:end,ch]
        # filter the local derivatives
        filt = Filters()
        data_filt, N, delay = filt.fir(time, data, ripple_db=20,
                        freq_trans_width=0.5, cutoff_hz=0.3, plot=False,
                        figpath=figpath, figfile=figfile + 'filter_design',
                        sample_rate=sample_rate)

        data_filt_dt = np.ndarray(data_filt.shape)
        data_filt_dt[1:] = data_filt[1:] - data_filt[0:-1]
        data_filt_dt[0] = np.nan

        # -------------------------------------------------
        # smoothen the signal with some splines
        # -------------------------------------------------
#        smoothen = UnivariateSpline(time, data_filt, s=2)
#        data_s = smoothen(time)
#        # first the derivatices
#        data_s_dt = np.ndarray(data_s.shape)
#        data_s_dt[1:] = data_s[1:]-data_s[:-1]
#        data_s_dt[0] = np.nan

        # -------------------------------------------------
        # filter values above certain treshold
        # ------------------------------------------------
        # only keep values which are steady, meaning dt signal is low!

        # based upon the filtering, only select data points for which the
        # filtered derivative is between a certain treshold
        staircase_i = np.abs(data_filt_dt).__ge__(dt_treshold)
        # make a copy of the original signal and fill in Nans on the selected
        # values
        data_reduced = data.copy()
        data_reduced[staircase_i] = np.nan
        data_reduced_dt = np.ndarray(data_reduced.shape)
        data_reduced_dt[1:] = np.abs(data_reduced[1:] - data_reduced[:-1])
        data_reduced_dt[0] = np.nan

        nonnan_i = np.isnan(data_reduced_dt).__invert__()
        dt_noise_treshold = data_reduced_dt[nonnan_i].max()
        print ' dt_noise_treshold ', dt_noise_treshold

        # remove all the nan values
        data_trim = data_reduced[np.isnan(data_reduced).__invert__()]
        time_trim = time[np.isnan(data_reduced).__invert__()]
#        # figure out which dt's are above the treshold
#        data_trim2 = data_trim.copy()
#        data_trim2.sort()
#        data_trim2.
#        # where the dt of the reduced format is above the noise treshold,
#        # we have a stair
#        data_trim_dt = np.abs(data_trim[1:] - data_trim[:-1])
#        argstairs = data_trim_dt.__gt__(dt_noise_treshold)
#        data_trim2 = data_trim_dt.copy()
#        data_trim_dt.sort()
#        data_trim_dt.__gt__(dt_noise_treshold)

        # -------------------------------------------------
        # read the average value over each stair (time and data)
        # ------------------------------------------------
        data_ordered, time_stair, data_stair = self.order_staircase(time_trim,
                                        data_trim, dt_noise_treshold*4.)

        # -------------------------------------------------
        # setup plot
        # -------------------------------------------------
        labels = np.ndarray(3, dtype='<U100')
        labels[0] = dspace.labels[ch]
        labels[1] = 'yawchan derivative'
        labels[2] = 'psd'

        plot = plotting.A4Tuned()
        title = figfile.replace('_', ' ')
        plot.setup(figpath+figfile+'_filter', nr_plots=2, grandtitle=title,
                   figsize_y=20, wsleft_cm=2., wsright_cm=2.5)

        # -------------------------------------------------
        # plotting of signal
        # -------------------------------------------------
        ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 1)
        ax1.plot(time, data, label='data')
        # add the results of the filtering technique
        time_stair, data_stair
        ax1.plot(time[N-1:], data_reduced[N-1:], 'r', label='data red')
#        ax1.plot(time[N-1:], data_filt[N-1:], 'g', label='data_filt')
        # also include the selected chair data
        label = '%i stairs' % data_stair.shape[0]
        ax1.plot(time_stair, data_stair, 'ko', label=label, alpha=0.2)
        ax1.grid(True)
        ax1.legend(loc='lower left')
        # -------------------------------------------------
        # plotting derivatives on right axis
        # -------------------------------------------------
        ax1b = ax1.twinx()
#        ax1b.plot(time[N:]-delay,data_s_dt[N:],alpha=0.2,label='data_s_dt')
        ax1b.plot(time[N:], data_filt_dt[N:], 'r', alpha=0.2,
                  label='data filt dt')
#        ax1b.plot(time[N:], data_reduced_dt[N:], 'b', alpha=0.2,
#                  label='data_reduced_dt')
#        ax1b.plot(time[N-1:]-delay, filtered_x_dt[N-1:], alpha=0.2)
        ax1b.legend()
        ax1b.grid(True)

        # -------------------------------------------------
        # the power spectral density
        # -------------------------------------------------
        ax3 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 2)
        Pxx, freqs = ax3.psd(data, Fs=sample_rate, label='data')
        Pxx, freqs = ax3.psd(data_dt, Fs=sample_rate, label='data dt')
#        Pxx, freqs = ax3.psd(data_s_dt, Fs=sample_rate, label='data_s_dt')
        Pxx, freqs = ax3.psd(data_filt_dt[N-1:], Fs=sample_rate,
                             label='data filt dt')
        ax3.legend()
#        print Pxx.shape, freqs.shape
        plot.save_fig()

        # -------------------------------------------------
        # get amplitudes of the stair edges
        # -------------------------------------------------

#        # max step
#        data_trim_dt_sort = data_trim_dt.sort()[0]
#        # estimate at what kind of a delta we are looking for when changing
#        # stairs
#        data_dt_std = data_trim_dt.std()
#        data_dt_mean = (np.abs(data_trim_dt)).mean()
#
#        time_data_dt = np.transpose(np.array([time, data_filt_dt]))
#        data_filt_dt_amps = HawcPy.dynprop().amplitudes(time_data_dt, h=1e-3)
#
#        print '=== nr amplitudes'
#        print len(data_filt_dt_amps)
#        print data_filt_dt_amps

        return time_stair, data_stair
Example #19
0
class PosAlgorithm():
    def __init__(self,testDf,
                 mainDf,segmentsStepsDf,
                 correlator,
                 spread = 15,
                 timeStep = 1,
                 ):
        # original database
        self.SmoothedDf = pd.io.parsers.read_csv(mainDf,index_col = 'index')
        # source for section imitation
        self.testDf = pd.io.parsers.read_csv(testDf,index_col = 'index')
        self.segmentsStepsDf = pd.io.parsers.read_csv(segmentsStepsDf)
        self.powerCorrelator = correlator
        # output database contained predicted points
        self.predicted_df = None
        # by default the number of unpredicted segments is 0
        self.unpredicted = 0
        # the dictionary of coefficients of correlation
        self.corrCoeffs = {}
        # range of indexes at the test data frame
        # range between indexes of grabbed section.
        # Other words it is just about the time of user's waiting in seconds
        self.spread = spread
        # the time step as constant step between rows at the database
        self.timeStep = timeStep
        # The number of laccids,grabbed by user. For "byLacCidMod" algorithm it must be more then 2.
        # Otherwise, it will works as "byLacCid" algorithm
        # self.numLC = numLC
        self.filters = Filters()

    def initGrabbedSet(self):
        # initialize self.segments variable
        self.generateRandomSegment()
        self.grabbedDf = self.getTestSection()

        #self.truthPoint = self.randomSampling(self.grabbedDf,numsamples = 1)
        self.truthPoint = self.grabbedDf.tail(1)
        self.trueSegment = self.truthPoint['segment'].unique().all()
    def generateRandomSegment(self):
        """
        Generate segment where user is located.
        :return:
        """

        # additional criterias
        """
        uniqueLc = self.testDf.groupby(['segment'])['laccid'].unique()
        # get rows with number of laccids more than ...
        byLc = uniqueLc[uniqueLc.apply(len)>=self.numLC]
        segments = list(byLc.keys())
        # get rows with number of races more than ...
        byRaces = self.testDf.groupby('segment')['race_id'].unique().apply(len)
        segments2 = list(byRaces[byRaces>self.numRaces].keys())
        # find the intersection of founded sets
        Segments = set(segments).intersection(segments2)
        """
        # generate test segment
        # simple random
        segLens = self.testDf.groupby(['segment']).apply(len)
        self.randSeg = segLens[segLens>self.spread].sample(1).keys()
        #print self.randSeg
    def getTestSection(self):
        """
        Get the dataframe grabbed by user.
        :return:
        """
        #self.randSeg = ['074-075']
        df = self.testDf [self.testDf['segment'].isin(self.randSeg)]

        self.analyzedDf = df.copy()
        # filtrate
        self.analyzedDf = self.filters.medianFilter(self.analyzedDf)

        # Note! change 'ratio' to 'TimeStamp' for real situation or remove this sorting!
        #grouped = self.analyzedDf.groupby('ratio').sort('ratio')

        # generate test slice
        #firstStamp = 41.0
        firstStamp = random.sample(self.analyzedDf[self.analyzedDf.TimeStamp < max(self.analyzedDf.TimeStamp) - self.spread].TimeStamp,1)[0]
        print " : " + str(firstStamp)
        self.analyzedDf.loc[:,'grabbed'] = np.nan
        self.analyzedDf = self.analyzedDf.sort('ratio')
        #lastIx = self.analyzedDf[self.analyzedDf.TimeStamp == self.analyzedDf.TimeStamp[firstIx] + self.spread].index
        self.analyzedDf.loc[self.analyzedDf[(self.analyzedDf.TimeStamp>=firstStamp)&(self.analyzedDf.TimeStamp<=firstStamp+self.spread)].index,'grabbed'] = 1
        #self.analyzedDf.loc[i:i+self.spread,'grabbed'] = 1
        self.analyzedDf['grabbed'] = self.analyzedDf['grabbed'].fillna(0)
        grabbed_df = self.analyzedDf[self.analyzedDf['grabbed'] == 1]
        grabbed_df = grabbed_df.sort(['TimeStamp','laccid'])
        #grabbed_df['index'] = range(0,len(grabbed_df))
        return grabbed_df
    def predict(self,alg,useSmoothed):
        """
        initialize the algorithm of postiioning prediction.
        :param alg: keyword for algoruthm
        :return:
        """
        self.corrCoeffs = {}
        if alg == "r":
            self.randomSampling(self.SmoothedDf)
        if alg == "lc":
            self.byLacCid()
        if alg == "lcM":
            self.byLacCidMod()
            #self.()
        if alg == "pc":
            self.byPowerCorr(useSmoothed = useSmoothed)


    def randomSampling(self,df,numsamples = 50):
        """
        Generate subset from input dataframe.
        :param df: dataframe to analyse
        :param numsamples: the number of samples
        :return:
        """
        rows = random.sample(df.index,numsamples)
        self.predictedDf = df.ix[rows]
        self.predicted_segments = self.predictedDf['segment'].unique()
        return self.predictedDf
    def byLacCid(self):
        """
        Use Lac and Cid identifiers of Base station only.
        :return:
        """
        self.grabbed_lc =  self.grabbedDf['laccid'].unique()
        self.predictedDf = self.SmoothedDf[self.SmoothedDf['laccid'].isin(self.grabbed_lc)]
        self.predicted_segments = self.predictedDf['segment'].unique()
        if self.predictedDf[self.predictedDf['segment'].isin(self.truthPoint['segment'].unique())].empty == True:
            self.unpredicted = 1
            print self.truthPoint
    def byLacCidMod(self):
        predictedInfo = pd.DataFrame()
        check = True
        laccids = self.grabbedDf.laccid.unique()
        if laccids.__len__()>1:
            actives = self.grabbedDf.Active.unique()
            uniqueLevels = {'before':self.spread,'after':self.spread}
            changedLcs = self.extractChanges()
            if changedLcs:
                predictedInfo = self.findChanges(changedLcs,uniqueLevels)
            else:
                if actives.__len__()>1:
                    predictedInfo = self.findActives(uniqueLevels)
            if not predictedInfo.empty:
                predictedDf = self.reduceByChanges(predictedInfo)
                self.predictedDf = predictedDf.sort(columns = ['segment','ratio','laccid'])
            if predictedInfo.empty:
                self.unpredicted = 1
        else:
            check = False
        return check
    def reduceByChanges(self,predictedInfo):
        predictedDf = pd.DataFrame()
        grouped = self.predictedDf.groupby('segment')
        for seg,gr in grouped:
            segInfo = predictedInfo[predictedInfo.segment == seg]
            for ix,row in segInfo.iterrows():
                #it might be more than one if segment contains several "change points"
                _gr = gr[(gr.ratio>=row['left'])&(gr.ratio<=row['right'])]
                predictedDf = pd.concat([predictedDf,_gr])
        predictedDf = predictedDf.drop_duplicates()
        return predictedDf
    def extractChanges(self):
        grouped = self.grabbedDf.groupby(['TimeStamp'])
        LcsPrev = np.array([])
        changed = []
        for ts,gr in grouped:
            uniqueLcs = gr.laccid.unique()
            if len(LcsPrev)>0:
                uniqueLcsNext = uniqueLcs
                if sorted(list(LcsPrev))!=sorted(list(uniqueLcsNext)):
                    changed.append({'prev':list(LcsPrev),'next':list(uniqueLcsNext)})
                LcsPrev = uniqueLcs
            if not len(LcsPrev)>0:
                LcsPrev = uniqueLcs
        return changed
    def findChanges(self,changedLcs,uniqueLevels):

        grouped = self.predictedDf.groupby(['segment','ratio'])
        predictedInfo = pd.DataFrame()
        LcsPrev = np.array([])
        ix = 0
        #LcsNext = None
        for pare in changedLcs:
            for (seg,rat),gr in grouped:
                if len(LcsPrev)>0:
                    uniqueLcsNext = gr.laccid.unique()
                    if (pare['next'] in uniqueLcsNext)&(pare['next'] not in LcsPrev):
                        leftDelta,rightDelta = self.findDiff(seg,uniqueLevels)
                        row = pd.DataFrame({'segment':seg,'left':prevPoint-leftDelta,'right':rat+rightDelta},index = [ix])
                        predictedInfo = pd.concat([predictedInfo,row])
                        ix+=1
                    LcsPrev = np.array([])
                if not (LcsPrev)>0:
                    uniqueLcsPrev = gr.laccid.unique()
                    if pare['prev'] in uniqueLcsPrev:
                        LcsPrev = uniqueLcsPrev
                        prevPoint = rat
                    else:
                        LcsPrev = np.array([])
                        prevPoint = None
        return predictedInfo


    def findActives(self,uniqueLevels):
        lcGrouped = self.grabbedDf.groupby('TimeStamp').\
            filter(lambda x : len(x)>1).groupby('TimeStamp').\
            apply(lambda x: np.unique(x['laccid']))
        laccidsAll = np.unique(lcGrouped.to_dict().values())

        filtered = self.predictedDf.groupby(['segment','ratio']).filter(lambda x : len(x)>1)

        activeGroup = filtered.groupby(['segment','ratio'])
        activePoints = activeGroup['laccid'].apply(np.unique)
        d = activePoints.apply(lambda x: sorted(list(x)) == sorted(laccidsAll)).to_dict()
        predictedFrame = pd.DataFrame([key for key in d.keys() if d[key] == True],columns = ['segment','ratio']).sort(['segment','ratio'])
        predictedInfo = self.extractBounds(predictedFrame,uniqueLevels)

        return predictedInfo
    def extractBounds(self,frame,uniqueLevels = 'default',clip = True):
        """
        Extract minimum and maximum ratios from the frame by each segment and clip predicted earlier frame by them.
        :param frame: frame contains "active points" with 2 fields : segment and ratio {pd.DataFrame}
        :param uniqueLevels: length of boundaries by which is need to clip (seconds) {int}
        :param clip: if need to clip or not {boolean}
        :return: clipped dataFrame {pd.DataFrame}
        """
        if uniqueLevels == 'default':
            uniqueLevels = {'after':0,'before':0}
        leftDelta,rightDelta = 0,0
        grouped = frame.groupby('segment')
        Predicted = pd.DataFrame()
        for seg,gr in grouped:
            _gr = pd.DataFrame({'segment':[seg]})
            if not clip:
                leftDelta,rightDelta = self.findDiff(seg,uniqueLevels)
            _gr.loc[:,'left'],_gr.loc[:,'right'] = min(gr['ratio'])-leftDelta,max(gr['ratio'])+rightDelta
            Predicted = pd.concat([Predicted,_gr])
        return Predicted
    def findDiff(self,seg,spread):
        #frame = frame.sort(['segment','ratio','laccid'])
        #diffs = np.diff(frame['ratio'],1)

        interpStep = self.segmentsStepsDf[self.segmentsStepsDf.segment == seg].interpStep.values[0]
            #diffs[diffs!=0][0]
        left,right = interpStep*spread['before'],interpStep*spread['after']
        return left,right

    def byLacCidMod2(self):
        """
        Use the information from neighbours laccids.
        :return:
        """
        #Note! Attach probability according  length of founded laccids for each step.
        # For example,probability for sublist with length 4 more than siblist with length 2,
        # because this means that in the first case 4 cell's stations were founded correctly, when
        # in the second case only 2. But it might be lack of the data in origin database.
        predicted_segments =[]
        # get predicted frame and segments according base laccid algorithm
        #self.byLacCid()
        self.unpredicted = 0
        # iterate by laccids at grabbed list of laccids.
        for step in range(len(self.grabbed_lc),1,-1):
            # check all combinations
            for sublist in itertools.combinations(self.grabbed_lc,step):
                predicted_subDf = self.predictedDf[self.predictedDf['laccid'].isin(sublist)]
                segments = predicted_subDf['segment'].unique()
                # find the right segments for this combination
                for seg in segments:
                    seg_subDf = predicted_subDf[predicted_subDf['segment'] == seg]
                    lc_subList = seg_subDf['laccid'].unique()
                    if (set(sublist).issubset(set(lc_subList))) == True:
                        predicted_segments.append(seg)
            if predicted_segments!=[]:
                break
        # if something founded - reduce the selection of predicted segments.
        if predicted_segments!=[]:
            self.predictedDf = self.predictedDf[self.predictedDf['segment'].isin(predicted_segments)]
        # if no segments - use the segments from base algorithm.
        else:
            self.unpredicted = 1

    def byPowerCorr(self,
                    useSmoothed = False):
        """
        The input segment should contains varying of signal. Only in this case
        Suppose that user's telephone grabbed not only the base station but neighbours too
        it is possible to identify truth position
        :return: predicted data frame.
        """
        self.unpredicted = 0
        self.resultsDf = pd.DataFrame()
        predictedDf = pd.DataFrame()
        fullPredicted = pd.DataFrame()

        # dataFrame contained control Rows.
        ReducingTypes = {'byAbs':'maxLimit','byCorr':'localMaxima'}
        # 1. Split phone data on base step's sections.
        if useSmoothed  ==True:
            #self.interpPowers = self.grabbedDf.groupby(['laccid'])['Power'].apply(list).to_dict()
            self.interpPowers = list(self.grabbedDf['Power'])
        else:
            self.interpolateByTimeStep()
        # 2. Compare powers of grabbed log with powers from database
        # a) If the variance of grabbed log close to zero --> compare Mean by list of absolute Power values.
        # b) Else --> compare the coefficients of correlation
        #       If corrCoeff < 0 : extract this indexes from predicted dataFrame
        #       If corrCoeff > 0 : find local maximums at the list of corrCoeffs and
        #                          extract all of the others from predicted dataFrame

        absMeans = self.powerCorrelator.analyzeLC(self.grabbedDf.groupby(['laccid'])['Power'].apply(list).to_dict())
        # Extract indexes iteratively
        powersDf = self.predictedDf.groupby(['segment'])
        first,last = 0,0
        for (seg,SegLcGroup) in powersDf:
            #analyzedSection = self.interpPowers[lc]
            analyzedSection = self.interpPowers
            if len(self.grabbed_lc) == 1:
                method = self.powerCorrelator.checkPredictionMethod(self.grabbed_lc[0], absMeans)
            else:
                method = 'byCorr'
            redType = ReducingTypes[method]
            predictedPart,allPredicted,last = self.powerCorrelator.loopThroughLaccid(SegLcGroup,method,analyzedSection,redType,return_all=True,last = last)

            predictedPart['sliceNumber'] = range(first,last)
            first = last
            predictedDf = pd.concat([predictedDf,predictedPart])
            fullPredicted = pd.concat([fullPredicted,allPredicted])

        if predictedDf.empty != True:
            controlCheck = 'controls' not in predictedDf.columns.values
            if controlCheck == True:
                print ""
            self.predictedDf = predictedDf
            self.fullPredicted = fullPredicted
        else:
            self.unpredicted = 1

    def interpolateByTimeStep(self):
        """
        Linear interpolation of grabbed log by the constant.
        :return: the dictionary were key is the LAC-CID
        and value is the array of interpolated powers
        """
        self.interpPowers = {}
        old = self.grabbedDf.groupby(['laccid'])['TimeStamp']\
            .apply(lambda x: list((x -min(x))/1000))

        new = self.grabbedDf.groupby(['laccid'])['TimeStamp']\
            .apply(lambda x: range(0,max(x -min(x))/1000+1,self.timeStep))
        for lc in old.keys():
            self.interpPowers[lc] = np.interp(new[lc],
                                         old[lc],
                                         self.grabbedDf.loc[self.grabbedDf['laccid'] == lc, 'Power'])
Example #20
0
class Tracker(object):
  """
  This is the main program which gives a high-level view
  of all the running subsystems. It connects camera input with
  output in form of "actions" (such as keyboard shortcuts on the users behalf).
  This is done by locating a hand in an image and detecting features,
  like the number of fingers, and trying to match that data with a
  known gesture.
  """

  def __init__(self):
    """
    Configuration
    """

    # Camera settings
    self.FRAME_WIDTH = 341
    self.FRAME_HEIGHT = 256
    self.flip_camera = True # Mirror image
    self.camera = cv2.VideoCapture(1)

    # ...you can also use a test video for input
    #video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov"
    #self.camera = cv2.VideoCapture(video)
    #self.skip_input(400) # Skip to an interesting part of the video

    if not self.camera.isOpened():
        print "couldn't load webcam"
        return
    #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH)
    #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT)

    self.filters_dir = "filters/" # Filter settings in trackbar
    self.filters_file = "filters_default"

    # Load filter settings
    current_config = self.filters_dir + self.filters_file
    self.filters = Filters(current_config)

    # No actions will be triggered in test mode
    # (can be used to adjust settings at runtime)
    self.test_mode = False

    # Create a hand detector
    # In fact, this is a wrapper for many detectors
    # to increase detection confidence
    self.detector = Detector(self.filters.config)

    # Knowledge base for all detectors
    self.kb = KB()
    # Create gesture recognizer.
    # A gesture consists of a motion and a hand state.
    self.gesture = Gesture()

    # The action module executes keyboard and mouse commands
    self.action = Action()

    # Show output of detectors
    self.output = Output()

    self.run()

  def run(self):
    """
    In each step: Read the input image and keys,
    process it and react on it (e.g. with an action).
    """
    while True:
      img = self.get_input()
      hand = self.process(img)
      ref = self.action.get_reference_point()
      self.output.show(img, hand, ref)

  def process(self, img):
    """
    Process input
    """
    # Run detection
    hand = self.detector.detect(img)
    # Store result in knowledge base
    self.kb.update(hand)
    if not self.test_mode:
      # Try to interprete as gesture
      self.interprete(hand)
    return hand

  def interprete(self, hand):
    """
    Try to interprete the input as a gesture
    """
    self.gesture.add_hand(hand)
    operation = self.gesture.detect_gesture()
    self.action.execute(operation)

  def get_input(self):
    """
    Get input from camera and keyboard
    """
    self.get_key()
    _, img = self.camera.read()
    img = cv2.resize(img, (self.FRAME_WIDTH, self.FRAME_HEIGHT))
    if self.flip_camera:
      img = cv2.flip(img, 1)
    return img


  def get_key(self):
    """
    Read keyboard input
    """
    key = cv2.waitKey(self.filters.config["wait_between_frames"])
    if key == ord('+'):
      # Reduce program speed
      self.filters.config["wait_between_frames"] += 500
    if key == ord('-'):
      # Increase program speed
      if self.filters.config["wait_between_frames"] >= 500:
        self.filters.config["wait_between_frames"] -= 500
    #if key == ord('s'):
    # Save config
    #  self.filters.save()
    if key == ord('r'):
      # Reset all detectors
      self.detector.reset()
      self.action.reset()
    if key == ord('d'):
      # Make a screenshot
      self.output.make_screenshot()
    if key == ord('p') or key == ord(' '):
      # Pause
      cv2.waitKey()
    if key == ord('t'):
      # Test mode
      self.test_mode = not self.test_mode
    if key == ord('1'):
      self.output.toggle_estimate()
    if key == ord('2'):
      self.output.toggle_detectors()
    if key == ord('3'):
      self.output.toggle_skin()
    if key == ord('f'):
      self.toggle_filters()
    if key == 63235: # Right arrow
      self.skip_input(20)
    if key == 27 or key == ord('q'):
      # Abort program on ESC, q or space
      exit()

  def toggle_filters(self):
    """
    Load the next filter settings
    """
    self.filters_file = self.next_filters_file()
    current_config = self.filters_dir + self.filters_file
    self.filters.set_config(current_config)

  def next_filters_file(self):
    """
    Get the next filter settings
    """
    filters = listdir(self.filters_dir)
    for i, f in enumerate(filters):
      if f == self.filters_file:
        return filters[(i+1) % len(filters)]

  def skip_input(self, x=1):
    """
    Skip to a different part of a video sequence.
    """
    for i in range(0,x):
      self.camera.grab()
Example #21
0
if __name__ == "__main__":

    im = Image.open("images/ens.jpg")
    width = im.size[0]
    height = im.size[1]
    # im = im.convert("L")
    pix = im.load()

    # Preparing the convolution
    convoluted_im = Image.new("RGB", (width, height))
    # convoluted_im = Image.new("L", (width, height))
    convoluted_pix = convoluted_im.load()

    # Importing the filters
    F = Filters()
    _ = [F.generic(), F.box_blur(), F.gauss(), F.sobel(), F.laplace(), F.horizontal_sobel()]
    names = ["generic", "box_blur", "gauss", "sobel_v", "laplace", "sobel_h"]
    for i, x in enumerate(_):
        f = _[i]
        # convolution(pix, convoluted_pix, width, height, f)
        rgb_convolution(pix, convoluted_pix, width, height, f)
        convoluted_im.save("images/ens_%sfilter.jpg" % names[i])

        # im.show()
        # convoluted_im.show()

        # Generic Pillow filter methods:
        # im1 = im.filter(ImageFilter.MedianFilter(3))
        # im1.show()
        # im2 = im.filter(ImageFilter.GaussianBlur(0))
Example #22
0
File: client.py Project: wezu/a4p
class Client(DirectObject):
    """
    Client class handels gui/input audio and rendering
    """
    def __init__(self):
        log.debug('Starting Client')
        #open a window... but first set all the needed props
        wp=self.loadWindoProperites()
        #open the window
        base.openMainWindow(props = wp)
        #base.setBackgroundColor(0.06, 0.1, 0.12, 1)
        base.setBackgroundColor(0.0, 0.0, 0.0, 1)
        base.disableMouse()
        base.enableParticles()

        #needed to determine what window event fired
        self.window_focused=base.win.getProperties().getForeground()
        self.window_x=base.win.getXSize()
        self.window_y=base.win.getYSize()
        self.window_minimized=base.win.getProperties().getMinimized()

        #filter manager, post process
        self.filters=Filters()

        #audio sound effects (sfx) + music
        self.audio=Audio()
        self.audio.setMusic('background')
        self.audio.playMusic()

        #light manager
        self.lights=LightManager()

        #setup the user interface (gui+key/mouse bind)
        self.ui=UserInterface()

        #skybox
        self.sun_and_sky=Skybox(self.lights)

        #player (character) droid
        self.droid=PCDroid(self.ui)

        #some vars used later
        self.map_name=None
        self.loading_status=set()
        self.level_root=render.attachNewNode('level_root')
        self.level_root.hide()
        self.is_in_game=False

        #events
        base.win.setCloseRequestEvent('exit-event')
        self.accept('exit-event',self.onClientExit)
        self.accept( 'window-event', self.onWindowEvent)
        self.accept( 'window-reset', self.onWindowReset)
        self.accept( 'client-mouselock', self.setMouseLock)
        self.accept( 'load-level', self.onLevelLoad)
        self.accept( 'loading-done', self.onLoadingDone)
        self.accept( 'reload-shaders', self.onShaderReload)
        self.accept( 'client-set-team', self.onTeamCahnge)
        self.accept( 'client-quit', self.onQuit)
        # Task
        taskMgr.add(self.update, 'client_update')

        log.debug('Client started')

    def doSomeStuffTsk(self, task):
        x=deque(range(5000))
        for i in xrange(999):
           random.shuffle(x)
           #print i, x[0]
        #print 'done'
        return task.done

    def setMouseLock(self, lock):
        wp = WindowProperties.getDefault()
        if lock:
            wp.setMouseMode(WindowProperties.M_confined)
        else:
            wp.setMouseMode(WindowProperties.M_relative)
        if not cfg['use-os-cursor']:
            wp.setCursorHidden(True)
        base.win.requestProperties(wp)

    def loadWindoProperites(self):
        #check if we can open a fullscreen window at the requested size
        if cfg['fullscreen']:
            mods=[]
            for mode in base.pipe.getDisplayInformation().getDisplayModes():
                mods.append([mode.width, mode.height])
            if list(cfg['win-size']) not in mods:
                cfg['fullscreen']=False
                log.warning('Can not open fullscreen window at '+str(cfg['win-size']))

        #the window props should be set by this time, but make sure
        wp = WindowProperties.getDefault()
        try:
            wp.setUndecorated(cfg['undecorated'])
            wp.setFullscreen(cfg['fullscreen'])
            wp.setSize(cfg['win-size'][0],cfg['win-size'][1])
            wp.setFixedSize(cfg['win-fixed-size'])
        except:
            log.warning('Failed to set window properties, Traceback:')
            for error in traceback.format_exc().splitlines()[1:]:
                log.warning(error.strip())

        #these probably won't be in the config (?)
        wp.setOrigin(-2,-2)
        wp.setTitle('A4P')
        if not cfg['use-os-cursor']:
            wp.setCursorHidden(True)
        return wp

    def loadLevel(self, task):
        log.debug('Client loading level...')
        with open(path+'maps/'+self.map_name+'.json') as f:
            values=json.load(f)
        #set the time
        self.sun_and_sky.setTime(values['level']['time'])
        #self.sun_and_sky.show()
        #load visible objects
        for id, obj in enumerate(values['objects']):
            mesh=loader.loadModel(path+obj['model'])
            mesh.reparentTo(self.level_root)
            mesh.setPosHpr(tuple(obj['pos']), tuple(obj['hpr']))
            mesh.setTag('id_'+str(id), str(id)) #we may need to find this mesh later to link it to a Bullet object
            for name, value in obj['shader_inputs'].items():
                if isinstance(value, basestring):
                    mesh.setShaderInput(str(name), loader.loadTexture(path+value))
                if isinstance(value, float):
                    mesh.setShaderInput(str(name), value)
                if isinstance(value, list):
                    if len(value) == 2:
                        mesh.setShaderInput(str(name), Vec2(value[0], value[1]))
                    elif len(value) == 3:
                        mesh.setShaderInput(str(name), Vec3(value[0], value[1], value[2]))
                    elif len(value) == 3:
                        mesh.setShaderInput(str(name), Vec4(value[0], value[1], value[2], value[3]))
            mesh.setShader(Shader.load(Shader.SLGLSL, obj['vertex_shader'],obj['fragment_shader']))
        #set the music
        self.audio.setMusic(values['level']['music'])
        #self.level_root.prepareScene(base.win.getGsg())
        messenger.send('loading-done', ['client'])
        return task.done

    #events
    def onQuit(self):
        self.level_root.removeNode()
        self.level_root=render.attachNewNode('level_root')
        self.level_root.hide()
        if self.ui.is_zoomed:
            self.ui.zoom()
        self.sun_and_sky.hide()
        self.droid.disable()
        self.ui.unbindKeys()
        self.ui.in_game_menu.hide()
        self.ui.main_menu.show()
        self.audio.setMusic('background')
        self.loading_status=set()
        self.is_in_game=False
        messenger.send('world-clear-level')


    def onTeamCahnge(self, team):
        self.droid.setTeam(team)

    def onShaderReload(self):
        log.debug('Client: Reloading shaders')
        for mesh in self.level_root.getChildren():
            shader=mesh.getShader()
            v_shader=shader.getFilename(Shader.ST_vertex)
            f_shader=shader.getFilename(Shader.ST_fragment)
            mesh.setShader(Shader.load(Shader.SLGLSL, v_shader,f_shader))
        self.ui.main_menu.setShader(path+'shaders/gui_v.glsl', path+'shaders/gui_f.glsl')
        self.filters.reset()

    def onLoadingDone(self, target):
        log.debug(str(target)+' loading done')
        self.loading_status.add(target)
        if self.loading_status == set(['client', 'server', 'world']):
            self.ui.main_menu.hide()
            self.level_root.show()
            self.sun_and_sky.show()
            self.ui.bindKeys()
            self.droid.node.setPos(render, 20,0,2)
            self.droid.lockCamera()
            self.droid.model.show()
            self.droid.rig.show()
            self.droid.gun.show()
            self.ui.in_game_menu.showElements('hud_')
            self.ui.hideSoftCursor()
            self.ui.is_main_menu=False
            self.is_in_game=True
            messenger.send('world-link-objects', [self.droid.node, 'pc_droid_node'])

    def onLevelLoad(self, map_name):
        self.map_name=map_name
        #we wait 1.0 sec for the loading animation to finish just in case if loading takes < 1.0 sec.
        taskMgr.doMethodLater(1.0, self.loadLevel, 'client_loadLevel_task', taskChain = 'background_chain')
        #taskMgr.add(self.loadLevel, 'client_loadLevel_task', taskChain = 'background_chain')
        #the client needs to load/setup:
        # -visible geometry
        # -enviroment (skybox/dome + sunlight diection + fog + ???)
        # -water plane
        # -unmovable (point)light sources
        # -unmovable vfx
        # -the player droid

    def onClientExit(self):
        log.debug('Client exit')
        self.audio.cleanup()
        app.exit()

    def onWindowReset(self):
        wp=self.loadWindoProperites()
        base.win.requestProperties(wp)

    def onWindowMinimize(self):
        self.window_minimized=base.win.getProperties().getMinimized()
        log.debug('window-event: Minimize is '+str(self.window_minimized))

    def onWindowFocus(self):
        self.window_focused=base.win.getProperties().getForeground()
        log.debug('window-event: Focus set to '+str(self.window_focused))
        if self.is_in_game:
            self.ui.in_game_menu.showMenu(self.window_focused)
        if not self.window_focused:
            self.ui.cursor_pos=(0,0,0)
        if cfg['pause-on-focus-lost']:
            if not self.window_focused:
                self.audio.pauseMusic()
                base.win.setActive(False)
            else:
                self.audio.resumeMusic()
                base.win.setActive(True)

    def onWindowResize(self):
        self.window_x=base.win.getXSize()
        self.window_y=base.win.getYSize()
        log.debug('window-event: Resize')
        self.filters.update()
        self.ui.updateGuiNodes()

    def onWindowEvent(self,window=None):
        if window is not None: # window is none if panda3d is not started
            if self.window_x!=base.win.getXSize() or self.window_y!=base.win.getYSize():
                self.onWindowResize()
            elif window.getProperties().getMinimized() !=  self.window_minimized:
                self.onWindowMinimize()
            elif window.getProperties().getForeground() !=  self.window_focused:
                self.onWindowFocus()

    #tasks
    def update(self, task):
        dt = globalClock.getDt()
        render.setShaderInput('camera_pos', base.cam.getPos(render))
        return task.cont
Example #23
0
 def setUp(self):
     self.filters = Filters()