def test_stair_freeyaw(self): """ """ fname = '/home/dave/Repositories/public/0_davidovitch/' fname += 'freeyaw-ojf-wt-tests/data/calibrated/DataFrame/' fname += '0212_run_064_9.0ms_dc1_freeyawplaying_stiffblades' fname += '_coning_pwm1000_highrpm.h5' res = pd.read_hdf(fname, 'table') time = res.time.values sps = 1.0 / np.diff(time).mean() ff = Filters() cutoff_hz = 1.0 order = 2 Wn = cutoff_hz*2.0/sps B, A = sp.signal.butter(order, Wn, output='ba') yawf = sp.signal.filtfilt(B, A, res.yaw_angle.values) # YAW plt.figure('yaw') plt.plot(res.time, res.yaw_angle, 'r-') plt.plot(res.time, yawf, 'b-') B, A = sp.signal.butter(order, 1.0*2.0/sps, output='ba') yawf2 = sp.signal.filtfilt(B, A, res.yaw_angle.values) plt.plot(res.time, yawf2, 'k--') # RPM data = res.rpm.values data_f = ff.butter_lowpass(sps, data, order=2, cutoff_hz=1.0) plt.figure('rpm') plt.plot(res.time, data, 'r-') plt.plot(res.time, data_f, 'b-') # filtered_x, N, delay = ff.fir(time, res.rpm, cutoff_hz=1.0, # freq_trans_width=1.0, ripple_db=50.0) # plt.plot(res.time, filtered_x, 'k--') smooth_window = 2.0 ws = int(smooth_window*sps) data_s = ff.smooth(res.rpm, window_len=ws, window='hanning') NN = len(data_s) - len(time) data_s = data_s[NN:] # time_s = time[NN:] plt.plot(time+(smooth_window/2.0), data_s, 'k--') # and up again in order not to brake the plotting further down time_down = np.arange(time[0], time[-1], 0.1) data_f_down = sp.interpolate.griddata(time, data_f, time_down) plt.plot(time_down, data_f_down, 'm-', alpha=0.7) # # and upsampling again # data = sp.interpolate.griddata(time_down, data_down, time) slope, intercept, r_value, p_value, std_err \ = sp.stats.linregress(data_f_down, y=time_down)
class WordsToBeKilled(unittest.TestCase): def setUp(self): self.filters = Filters() def tearDown(self): self.filters = None def testKnownWords(self): known_words = [u'知识'] for w in known_words: self.assertTrue(self.filters.is_known_word(w)) def testNonChineseword(self): non_chinese = [u'Chinese', u'[', u',', u' '] for w in non_chinese: self.assertTrue(self.filters.is_not_chinese_word(w)) def testAA(self): aa_words = [u'天天', u'年年'] for w in aa_words: self.assertTrue(self.filters.is_AA(w)) def testNumber(self): number_words = [u'千万', u'几个亿', u'一九三八', u'十三万二千五百三十五'] for w in number_words: self.assertTrue(self.filters.is_number(w))
def __build_resp_layers(self): target_image = self.__grayscale image_size = (target_image.width, target_image.height) for s in Surf.SCALES: Dxx = cv.CloneImage(target_image) Dyy = cv.CloneImage(target_image) Dxy = cv.CloneImage(target_image) # Calculating convolutions cv.Filter2D(target_image, Dxx, Filters.fast_hessian(s, FastHessianType.DXX)) cv.Filter2D(target_image, Dyy, Filters.fast_hessian(s, FastHessianType.DYY)) cv.Filter2D(target_image, Dxy, Filters.fast_hessian(s, FastHessianType.DXY)) l = s / 3.0 scale_factor1 = 1 scale_factor2 = 1 temp = cv.CloneImage(Dyy) cv.ConvertScale(Dyy, temp, scale=(1.0 / scale_factor1)) #cv.SaveImage('D:\\hess\\dyy'+str(s)+'.jpg', temp) #cv.SaveImage('D:\\hess\\dyy'+str(s)+'.jpg', Dyy) #cv.SaveImage('D:\\hess\\dxy'+str(s)+'.jpg', Dxy) #Calculating hessian HessMatrS = cv.CreateImage(image_size, cv.IPL_DEPTH_64F, 1) DxxMultDyy = cv.CloneImage(target_image) cv.Mul(Dxx, Dyy, DxxMultDyy, 1.0 / scale_factor1**2) DxySquared = cv.CloneImage(target_image) cv.Mul(Dxy, Dxy, DxySquared, Surf.HESSIAN_RELATIVE_WEIGHT**2 / scale_factor2**2) cv.Sub(DxxMultDyy, DxySquared, HessMatrS) cv.SaveImage('D:\\hess\\hessian'+str(s)+'.jpg', HessMatrS) self.__responses.append(HessMatrS) #Calculating laplacian (trace of the hessian matrix) traceS = cv.CreateImage(image_size, cv.IPL_DEPTH_64F, 1) cv.Add(Dxx, Dyy, traceS) self.__traces.append(traceS)
def __init__(self): log.debug('Starting Client') #open a window... but first set all the needed props wp = self.loadWindoProperites() #open the window base.openMainWindow(props=wp) #base.setBackgroundColor(0.06, 0.1, 0.12, 1) base.setBackgroundColor(0.0, 0.0, 0.0, 1) base.disableMouse() base.enableParticles() #needed to determine what window event fired self.window_focused = base.win.getProperties().getForeground() self.window_x = base.win.getXSize() self.window_y = base.win.getYSize() self.window_minimized = base.win.getProperties().getMinimized() #filter manager, post process self.filters = Filters() #audio sound effects (sfx) + music self.audio = Audio() self.audio.setMusic('background') self.audio.playMusic() #light manager self.lights = LightManager() #setup the user interface (gui+key/mouse bind) self.ui = UserInterface() #skybox self.sun_and_sky = Skybox(self.lights) #player (character) droid self.droid = PCDroid(self.ui) #some vars used later self.map_name = None self.loading_status = set() self.level_root = render.attachNewNode('level_root') self.level_root.hide() self.is_in_game = False #events base.win.setCloseRequestEvent('exit-event') self.accept('exit-event', self.onClientExit) self.accept('window-event', self.onWindowEvent) self.accept('window-reset', self.onWindowReset) self.accept('client-mouselock', self.setMouseLock) self.accept('load-level', self.onLevelLoad) self.accept('loading-done', self.onLoadingDone) self.accept('reload-shaders', self.onShaderReload) self.accept('client-set-team', self.onTeamCahnge) self.accept('client-quit', self.onQuit) # Task taskMgr.add(self.update, 'client_update') log.debug('Client started')
def analyze(settings): start_time = time.time() f = Filters(settings['code'], settings['filters']) tree = Tree(settings['path'], f.filters(), settings['antifilters'], settings) tree.output() time_sec = time.time() - start_time print ("\nanalyze time : {0:2.5f}sec".format(time_sec))
def prewittEdgeDetector(self,img,shape=(5,5)): filter = Filters() op = Operations() sx = filter.prewittKernel(shape,axis=0) dx = op.doConvolution(sx,img) sy = filter.prewittKernel(shape,axis=1) dy = op.doConvolution(sy,img) delta = np.power(np.add(np.power(dx,2),np.power(dy,2)),0.5) return delta
def analyze(settings): start_time = time.time() f = Filters(settings['code'], settings['filters']) tree = Tree(settings['path'], f.filters(), settings['antifilters'], settings) tree.output() time_sec = time.time() - start_time print("\nanalyze time : {0:2.5f}sec".format(time_sec))
def __init__(self, label): self.label = label # label to display image self.image = None # PIL image modified for display self.backup = None # PIL image backup (base for zoom) self.imageTk = None # PIL TK image displayed self.filename = None # filename self.zooming = 0 # zoom in range [min..max] self.min = 0 # minimum size of image self.max = 0 # maximum size of image self.f = Filters()
def __init__(self, m_length, error_rate, db_host, db_port, \ dbname, dbcollections, path=None, debug=False): self.logdupes = True self.debug = debug self.logger = logging.getLogger(__name__) self.fingerprints = Filters(m_length, error_rate) self.logger.info("created bloomfilter({},{}) <----- wangyf"\ .format(m_length, error_rate)) self.mongodb = MongoDBClient(db_host, db_port, dbname, dbcollections)
def __init__(self, constspeed, carbranch, light): # load data try: file_path = open('filename_path', 'r').readline().split('\n')[0] self.file = open(file_path, 'w') except Exception: print "cannot find file" sys.exit(0) self.file.write( "x,y,z,speed,acceleration,curvature," "curvature_change_rate,time,theta,gear,s,left_turn,right_turn\n") self.cars = 0 self.carcurvature = 0 self.startmoving = False self.terminating = False self.constspeed = constspeed self.acc_filter = Filters(10) self.recordlight = light == 't' if carbranch == 'lincoln': print "current branch is lincoln" self.maxsteer = 470 self.steerratio = 16 self.wheelbase = 2.85 elif carbranch == 'byd': print "current branch is byd" self.maxsteer = 540 self.steerratio = 17.5 self.wheelbase = 2.67 elif carbranch == 'kinglong': print "current branch is kinglong" self.maxsteer = 540 self.steerratio = 17.2 self.wheelbase = 2.75 elif carbranch == 'h7phev': print "current branch is h7phev" self.maxsteer = 540 self.steerratio = 15.33 self.wheelbase = 2.96 elif carbranch == 'daimler': print "current branch is daimler" self.maxsteer = 38.49 self.steerratio = 1.0 self.wheelbase = 3.43 elif carbranch == 'hongqi': print "current branch is hongqi" self.maxsteer = 480 self.steerratio = 16.18 self.wheelbase = 2.75 else: print "car branch does not exist" print "Usage: python recorder_path_cyber.py -b lincoln/byd/kinglong/h7phev/daimler" sys.exit(0)
def __init__(self): # Main parameters self.dataset_path = os.getcwd() + "\dataset" self.output_path = os.getcwd() + "\identified" self.name_output = "frame" self.exams = os.listdir(self.dataset_path) self.detail_presentation = True self.save_output = True self.sleep_pause = 3 self.filters = Filters(self.detail_presentation)
def opportunities(): '''display search results to volunteer''' if 'filters' in request.cookies: cookie = (request.cookies.get('filters')) #grabs cookie filters = cookie.split("/") # splits cookie into list index = int(filters[0]) # grabs index from list cat = filters[1] # grabs categories from list categories = cat.split("-") avail = filters[2] # grabs available days availability = avail.split("-") # splits into list zipcode = filters[3] #grabs zipcode from list distance = filters[4] #grabs distance from list search = Filters( categories=categories, availability=availability, zipcode=zipcode, distance=distance ) # creates filter with given category and availability opps = search.search() #grabs list of opportunities error = check_opps(opps) if error: flash(error) return redirect('/filters') opp = opps[index] # picks out the opp at index index = increment(index, len(opps)) # increments index event_date = readable_date(opp.startDateTime) event_time = readable_times(opp.startDateTime, opp.duration) resp = make_response( render_template('volunteer/opportunities.html', opp=opp, event_date=event_date, event_time=event_time, json=json, title="Voluntr | Browse Opportunities", is_production=is_production) ) # tells the cookie what to load while it sets itself resp.set_cookie('filters', str(index) + "/" + cat + "/" + avail + "/" + zipcode + "/" + distance) #preps cookie for setting return resp # sets cookie and displays page return redirect("/filters") # redirects to filters if no cookie exists
def __init__(self): """ Configuration """ # Camera settings self.FRAME_WIDTH = 341 self.FRAME_HEIGHT = 256 self.flip_camera = True # Mirror image self.camera = cv2.VideoCapture(1) # ...you can also use a test video for input #video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov" #self.camera = cv2.VideoCapture(video) #self.skip_input(400) # Skip to an interesting part of the video if not self.camera.isOpened(): print "couldn't load webcam" return #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH) #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT) self.filters_dir = "filters/" # Filter settings in trackbar self.filters_file = "filters_default" # Load filter settings current_config = self.filters_dir + self.filters_file self.filters = Filters(current_config) # No actions will be triggered in test mode # (can be used to adjust settings at runtime) self.test_mode = False # Create a hand detector # In fact, this is a wrapper for many detectors # to increase detection confidence self.detector = Detector(self.filters.config) # Knowledge base for all detectors self.kb = KB() # Create gesture recognizer. # A gesture consists of a motion and a hand state. self.gesture = Gesture() # The action module executes keyboard and mouse commands self.action = Action() # Show output of detectors self.output = Output() self.run()
def __new_flight__(self, flight_id: int, username: str, depart: str, dest: str, depart_date: str, flight_type: str, return_date: Optional[str] = None): """ Creates a basic flight instance, its table, and its dict. :param flight_id: :param username: :param depart: :param dest: :param depart_date: :param flight_type: :param return_date: :return: COMMENT: need to increment flight_id by 1 in load_app """ self.username = username self.flight_id = flight_id self.table_name = 'tb_' + str(flight_id) self.flight_type = flight_type self.depart = depart self.dest = dest self.depart_date = depart_date self.return_date = return_date # Create table and dicts mydb = mysql.connector.connect(host='localhost', user='******', passwd='flightplanner', database='FP_database') mycursor = mydb.cursor() set_up_command = 'CREATE TABLE {0} (Flight_id int, Date varchar(255), Min_eco int, Min_bus int, Avg_econ int,' \ ' Avg_bus int, Track_date varchar(255));'.format(self.table_name) mycursor.execute(set_up_command) mydb.commit() mycursor.close() self.info_dict = dict() self.filters = Filters() self.filters.require_filters() self.notifications = Notification() self.notifications.require_notifications()
def init_components(self): self.packages = api.get_packages() (total, installed, updates) = api.get_stats(self.packages) self.categories = api.get_sections(self.packages) self.searchbar = AlpsUISearchBar() self.toolbar = AlpsUIToolBar(self.searchbar) self.toolbar_container = self.toolbar.layout() self.packagelist = PackageList(self) self.filters = Filters(self.on_filter) self.scrolledwindow = self.wrap_scrollbar(self.packagelist.list) self.statusbar = AlpsUIStatusBar() self.category_list = Categories(self.categories, self.on_category_change) self.packagelist.set_packages(self.packages) self.toolbar.init_statusbar(self.statusbar) self.toolbar.set_mainframe(self) self.searchbar.set_category_list(self.category_list) self.searchbar.set_package_list(self.packagelist) self.statusbar.set_status_text(0, total) self.statusbar.set_status_text(1, installed) self.statusbar.set_status_text(2, updates)
def __init__(self,testDf, mainDf,segmentsStepsDf, correlator, spread = 15, timeStep = 1, ): # original database self.SmoothedDf = pd.io.parsers.read_csv(mainDf,index_col = 'index') # source for section imitation self.testDf = pd.io.parsers.read_csv(testDf,index_col = 'index') self.segmentsStepsDf = pd.io.parsers.read_csv(segmentsStepsDf) self.powerCorrelator = correlator # output database contained predicted points self.predicted_df = None # by default the number of unpredicted segments is 0 self.unpredicted = 0 # the dictionary of coefficients of correlation self.corrCoeffs = {} # range of indexes at the test data frame # range between indexes of grabbed section. # Other words it is just about the time of user's waiting in seconds self.spread = spread # the time step as constant step between rows at the database self.timeStep = timeStep # The number of laccids,grabbed by user. For "byLacCidMod" algorithm it must be more then 2. # Otherwise, it will works as "byLacCid" algorithm # self.numLC = numLC self.filters = Filters()
def filter_obj_alt(): from filters import Filters return Filters(analytical_threshold=0.02, abs_analytical_threshold=11, min_mapq=60, max_mapq=254, remove_deletions=True)
def __init__(self, bam, bed, info, out, analytical_threshold=ANALYTICAL_THRESHOLD): self.bam = bam self.bed = bed self.info = info self.out = out self.analytical_threshold = analytical_threshold # Init self.bed_obj = Bed(self.bed) self.info_obj = Info(self.info) self.mh_dict = {} self.filters_obj = Filters( analytical_threshold=self.analytical_threshold, abs_analytical_threshold=self.ABSOLUTE_ANALYTICAL_THRESHOLD, min_mapq=self.MIN_MAPPING_QUALITY, max_mapq=self.MAX_MAPPING_QUALITY, remove_deletions=self.REMOVE_DELETIONS) self.short_reads_count = 0 self.total_reads_count = 0 self.number_of_contributors_overall = 0
def getReviewersAndWatchers(db, repository, commits=None, changesets=None, reviewfilters=None, applyfilters=True, applyparentfilters=False): """getReviewersAndWatchers(db, commits=None, changesets=None) -> tuple Returns a tuple containing two dictionaries, each mapping file IDs to dictionaries mapping user IDs to sets of changeset IDs. The first dictionary defines the reviwers of each file, the second dictionary defines the watchers of each file. For any changes in a file for which no reviewer is identified, None is used as a key in the dictionary instead of a real user ID.""" if changesets is None: changesets = [] changeset_utils.createChangesets(db, repository, commits) for commit in commits: changesets.extend(changeset_utils.createChangeset(db, None, repository, commit, do_highlight=False)) cursor = db.cursor() filters = Filters() filters.setFiles(db, list(getFileIdsFromChangesets(changesets))) if applyfilters: filters.load(db, repository=repository, recursive=applyparentfilters) if reviewfilters: filters.addFilters(reviewfilters) reviewers = {} watchers = {} for changeset in changesets: author_user_ids = changeset.child.author.getUserIds(db) if changeset.child else set() cursor.execute("SELECT DISTINCT file FROM fileversions WHERE changeset=%s", (changeset.id,)) for (file_id,) in cursor: reviewers_found = False for user_id, (filter_type, delegate) in filters.listUsers(file_id).items(): if filter_type == 'reviewer': if user_id not in author_user_ids: reviewer_user_ids = [user_id] elif delegate: reviewer_user_ids = [] for delegate_user_name in delegate.split(","): delegate_user = dbutils.User.fromName(db, delegate_user_name) reviewer_user_ids.append(delegate_user.id) else: reviewer_user_ids = [] for reviewer_user_id in reviewer_user_ids: reviewers.setdefault(file_id, {}).setdefault(reviewer_user_id, set()).add(changeset.id) reviewers_found = True else: watchers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset.id) if not reviewers_found: reviewers.setdefault(file_id, {}).setdefault(None, set()).add(changeset.id) return reviewers, watchers
def url_to_filtered_image(image_url, fil_str, intensity): """ Recebe url da imagem, nome do filtro e intensidade e retorna imagem filtrada em string de bytes""" filename = "image.jpg" image = url_to_image(image_url) # Inicia classe de Filtros e lista os filtros disponíveis fil = Filters() for i, item in enumerate(fil.filter_names): if item == fil_str: break # Recebe os inputs do usuário para selecionar o filtro # e sua intensidade num_filter = i num_param = intensity # Executa a funcao do filtro sobre a imagem image = fil.filter_funcs[num_filter](image, num_param) # Salva imagem cv2.imwrite(filename, image) # Armazena em um buffer e converte para base64 retval, buff = cv2.imencode('.jpg', image) base64_bytes = base64.b64encode(buff.tostring()) # Envia imagem filtrada return base64_bytes
def __init__(self): log.debug('Starting Client') #open a window... but first set all the needed props wp=self.loadWindoProperites() #open the window base.openMainWindow(props = wp) #base.setBackgroundColor(0.06, 0.1, 0.12, 1) base.setBackgroundColor(0.0, 0.0, 0.0, 1) base.disableMouse() base.enableParticles() #needed to determine what window event fired self.window_focused=base.win.getProperties().getForeground() self.window_x=base.win.getXSize() self.window_y=base.win.getYSize() self.window_minimized=base.win.getProperties().getMinimized() #filter manager, post process self.filters=Filters() #audio sound effects (sfx) + music self.audio=Audio() self.audio.setMusic('background') self.audio.playMusic() #light manager self.lights=LightManager() #setup the user interface (gui+key/mouse bind) self.ui=UserInterface() #skybox self.sun_and_sky=Skybox(self.lights) #player (character) droid self.droid=PCDroid(self.ui) #some vars used later self.map_name=None self.loading_status=set() self.level_root=render.attachNewNode('level_root') self.level_root.hide() self.is_in_game=False #events base.win.setCloseRequestEvent('exit-event') self.accept('exit-event',self.onClientExit) self.accept( 'window-event', self.onWindowEvent) self.accept( 'window-reset', self.onWindowReset) self.accept( 'client-mouselock', self.setMouseLock) self.accept( 'load-level', self.onLevelLoad) self.accept( 'loading-done', self.onLoadingDone) self.accept( 'reload-shaders', self.onShaderReload) self.accept( 'client-set-team', self.onTeamCahnge) self.accept( 'client-quit', self.onQuit) # Task taskMgr.add(self.update, 'client_update') log.debug('Client started')
def flood_image(original_image_path, flooded_output_path): """ Floods background of the image, starting at seeds that are obtained at the edges in unknown areas. After filling, unknown areas should only be inside particles. :param original_image_path: Path to input image :param flooded_output_path: Path to flooded image :return: Image where background is BG_COLOR and rest is either UNKNOWN_COLOR or FG_COLOR. """ img = cv2.imread(original_image_path, 0) sure_fg = Filters.threshold(img, 130, False) #equalize histogram # equalized_img = ImageUtilities.histeq(img) # plt.subplot(121), plt.imshow(img, cmap='gray') # plt.title('img'), plt.xticks([]), plt.yticks([]) # plt.subplot(122), plt.imshow(equalized_img, cmap='gray') # plt.title('equalized_img'), plt.xticks([]), plt.yticks([]) # plt.show() # Scharr filter grad = Filters.scharr(img) grad_8 = np.uint8(grad) grad_bg = Filters.threshold(grad_8, 4, True) grad_bg[np.all([grad_bg == 0, img < 55], axis=0)] = 255 sure_bg = grad_bg - sure_fg sure_bg_filtered = Filters.median_filter(sure_bg, 3) cv2.imwrite('Gradient.tif', grad_bg) cv2.imwrite('Gradient_Background.tif', grad_bg) cv2.imwrite('Sure_Background_Filtered.tif', sure_bg_filtered) sure_bg_inv = Filters.threshold(sure_bg_filtered, 2, True) sure_bg_inv = (sure_bg_inv // 255) + 1 sure_bg_flood = np.copy(sure_bg_inv) seeds = find_seeds(sure_bg_flood) flood_fill(seeds, sure_bg_flood, UNKNOWN_COLOR, BG_COLOR) cv2.imwrite(flooded_output_path, sure_bg_flood) return sure_bg_flood
def test_linregress(self): """ """ fname = '/home/dave/Repositories/public/0_davidovitch/' fname += 'freeyaw-ojf-wt-tests/data/calibrated/DataFrame/' fname += '0212_run_064_9.0ms_dc1_freeyawplaying_stiffblades' fname += '_coning_pwm1000_highrpm.h5' res = pd.read_hdf(fname, 'table') time = res.time.values sps = 1.0 / np.diff(time).mean() freq_down = 0.1 window = 4.0 ff = Filters() data = res.rpm.values data_f = ff.butter_lowpass(sps, data, order=2, cutoff_hz=1.0) time_down = np.arange(time[0], time[-1], freq_down) data_f_down = sp.interpolate.griddata(time, data_f, time_down) regress = ff.linregress(time_down, data_f_down, int(window/freq_down)) diff = np.diff(data_f_down) / freq_down plt.figure('rpm') plt.plot(time, data, 'r-') plt.plot(time_down, data_f_down, 'k--') plt.twinx() plt.plot(time_down[:-int(window/freq_down)], np.abs(regress[:,0]), 'b--') plt.plot(time_down[:-1], np.abs(diff), 'g--') plt.ylim([0, 5]) plt.grid() data = res.yaw_angle.values data_f = ff.butter_lowpass(sps, data, order=2, cutoff_hz=1.0) data_f_down = sp.interpolate.griddata(time, data_f, time_down) regress = ff.linregress(time_down, data_f_down, int(window/freq_down)) diff = np.diff(data_f_down) / freq_down plt.figure('yaw') plt.plot(time, data, 'r-') plt.plot(time_down, data_f_down, 'k--') plt.twinx() plt.plot(time_down[:-int(window/freq_down)], np.abs(regress[:,0]), 'b--') plt.plot(time_down[:-1], np.abs(diff), 'g--') plt.ylim([0, 5]) plt.grid()
def swcli(resource, filter_cmds=None, cache=True): api = Swapi(cache) resources = api.get_root() if resource not in resources: raise Exception( 'Invalid resource name: \'%s\'. Available resources: [%s]' % (resource, ', '.join(resources))) data = api.get_resource(resource) if filter_cmds: filters = Filters(api) for f in filter_cmds: data = filters.filter_data(data, field_name=f[0], operator=f[1], value=f[2]) return data
def track_image(original_image_path, flooded_background): img = cv2.imread(original_image_path, 0) sure_fg = Filters.threshold(img, 180, False) tracked_edges = np.copy(flooded_background) contours = track_all_particles(sure_fg, tracked_edges) nparr = np.asarray(contours) np.save("contours.npy", nparr) return contours
def compare_filter(noise_name, var_list=None, noise_data=None): if noise_data is None: dist = getattr(noise, noise_name)() cv.namedWindow(noise_name, cv.WINDOW_NORMAL) cv.imshow(noise_name, dist) else: dist = noise_data if not var_list: return my_filter = Filters(dist) out = None for key, value in var_list.items(): if not value: out = my_filter.core(mode=key) else: out = my_filter.core(mode=key, **value) cv.namedWindow(key, cv.WINDOW_NORMAL) cv.imshow(key, out) cv.waitKey(0) return out
def repeat_median(): dist = noise.salt_and_pepper(salt_vs_pepper=0.1) cv.namedWindow('salt_and_pepper', cv.WINDOW_NORMAL) cv.imshow('salt_and_pepper', dist) my_filter = Filters(dist) out = my_filter.core(mode='median_filter') cv.namedWindow('1', cv.WINDOW_NORMAL) cv.imshow('1', out) my_filter1 = Filters(out) out1 = my_filter1.core(mode='median_filter') cv.namedWindow('2', cv.WINDOW_NORMAL) cv.imshow('2', out1) my_filter2 = Filters(out1) out2 = my_filter2.core(mode='median_filter') cv.namedWindow('3', cv.WINDOW_NORMAL) cv.imshow('3', out2) cv.waitKey(0)
def __init__(self): # Main parameters # self.path = os.getcwd() # self.dataset_path = os.path.join(self.path, 'dataset') # self.output_path = os.path.join(self.path, 'out') self.dataset_path = '/media/marcos/Dados/Projects/Datasets/Exams' self.output_path = '/media/marcos/Dados/Projects/Results/Qualificacao/AlgoPPL/frames' self.labe_output_path = '/media/marcos/Dados/Projects/Results/Qualificacao/AlgoPPL/label' self.exams = ['benchmark_final.avi'] # self.exams = os.listdir(self.dataset_path) self.detail_presentation = True self.save_output = True self.execute_invisible = True self.save_csv = True self.sleep_pause = 3 self.filters = Filters(self.detail_presentation)
def __load_flight__(self, flight_id: int, flight: Flight): """ loads a flight object by obtaining into from general DB :param flight_id: :param flight: :return: """ info_tuple = self.flights_db.flights_dict[flight_id] # load into flight object flight.username = info_tuple[0] flight.flight_type = info_tuple[1] flight.depart = info_tuple[2] flight.dest = info_tuple[3] flight.depart_date = info_tuple[4] flight.return_date = info_tuple[5] flight.table_name = 'tb_' + str(flight_id) flight.flight_id = flight_id temp_filter = Filters() temp_filter.insert_day_filter(info_tuple[6]) temp_filter.insert_depart_time_filter(info_tuple[7]) temp_filter.insert_max_duration_filter(info_tuple[8]) temp_filter.insert_price_amount_filter(info_tuple[9]) flight.filters = temp_filter temp_noti = Notification() if info_tuple[10] != "NULL": limit_min = info_tuple[10].split(';')[0] limit_max = info_tuple[10].split(':')[1] temp_noti.insert_amount(limit_min, limit_max) if info_tuple[11] != "NULL": temp_noti.insert_diff(int(info_tuple[11])) if info_tuple[12] != "NULL": days_since_reset = info_tuple[12].split(';')[0] inc_num = info_tuple[12].split(';')[1] direction = info_tuple[12].split(';')[2] temp_noti.insert_trend(days_since_reset, inc_num, direction) flight.notifications = temp_noti
def suggestion_gui(root, account_data, account_found): utility.clear_root(root) title = tk.Label(root, text="VMedia: Suggestions", font=("arial", 28, "bold"), fg=fg_col, bg=bg_col) title.place(relx=0.5, rely=0.05, anchor=tk.CENTER) utility.underline(title) table = create_table(root) table.place(relx=0.1, rely=0.2, relwidth=0.40, relheight=0.75) scroll_bar_y = tk.Scrollbar(root, command=table.yview) scroll_bar_y.place(relx=0.5, rely=0.2, relheight=0.75) select_button = tk.Button(root, text="Select media", font=("arial", 10, "bold"), bg=button_col, command=lambda: select_media(root, table, likes_to_save, search_bar.get(), filter_obj)) select_button.place(relx=0.5, rely=0.1, relwidth=0.04, relheight=0.025, anchor=tk.CENTER) search_bar = tk.Entry(root, relief=tk.GROOVE, bd=2, font=("arial", 13)) search_bar.place(relx=0.1, rely=0.15, relwidth=0.68, relheight=0.025) search_button = tk.Button(root, text="Search", font=("arial", 10, "bold"), bg=button_col, command=lambda: search(table, search_bar.get(), filter_obj)) search_button.place(relx=0.8, rely=0.15, relwidth=0.04, relheight=0.025) show_all_button = tk.Button(root, text="Show All", font=("arial", 10, "bold"), bg=button_col, command=lambda: search(table, "")) show_all_button.place(relx=0.85, rely=0.15, relwidth=0.04, relheight=0.025) exit_button = tk.Button(root, text="Exit", font=("arial", 10, "bold"), bg=button_col, command=lambda: updating_account_data(account_found, likes_to_save)) exit_button.place(relx=0.8, rely=0.05, relwidth=0.09, relheight=0.05) filters_label = tk.Label(root, text="Filters:", font=("arial", 25, "bold"), fg=fg_col, bg=bg_col) filters_label.place(relx=0.60, rely=0.25) utility.underline(filters_label) filter_obj = Filters() filters(root, filter_obj) if account_data == ['']: account_data = [] # A second list is made so media already used to calculate score do not need to be checked again likes_to_save: list[int] = [int(x) for x in account_data] global global_likes_to_save global_likes_to_save = likes_to_save media_data_to_set_scores = suggestion_algorithm_single_use(likes_to_save) ordered_media_classes = main_algorithm(media_data_to_set_scores) insert_media_table(table, ordered_media_classes)
class Main: def __init__(self): # Main parameters self.dataset_path = os.getcwd() + "\dataset" self.output_path = os.getcwd() + "\identified" self.name_output = "frame" self.exams = os.listdir(self.dataset_path) self.detail_presentation = True self.save_output = True self.sleep_pause = 3 self.filters = Filters(self.detail_presentation) def start_process(self): exam_jump_process = 0 exam_process = 0 for exam in self.exams: if exam_process >= exam_jump_process: movie = cv2.VideoCapture("{}/{}".format( self.dataset_path, exam)) self.pupil_process(movie) exam_process += 1 def pupil_process(self, exam): number_frame = 0 while True: _, frame = exam.read() if frame is None: break presentation, final = self.filters.pupil_analysis(frame) cv2.namedWindow('Training', cv2.WINDOW_NORMAL) cv2.imshow('Training', presentation) if self.save_output: name_output = "%s/%s_%03d.png" % ( self.output_path, self.name_output, number_frame) img_save = presentation if self.detail_presentation else final cv2.imwrite(name_output, img_save) if cv2.waitKey(1) & 0xFF == ord('p'): # Pause time.sleep(self.sleep_pause) number_frame += 1 exam.release() cv2.destroyAllWindows
class WordExtractor(object): def __init__(self, output_file, get_word_freq = None): self.get_word_freq = get_word_freq self.new_words = wordb.open(output_file) self.filters = Filters() self.n_killed = 0 self.n_added = 0 def __call__(self, words): self.process_words(words, threshold=2560000) def process_files(files): """process file in batch """ for fn in files: with codecs.open(fn, 'r', 'utf-8') as f: self.process_file(f) def process_file(self, input_file): """process segmented file """ words = set() for line in input_file: words.add(set(line.split(u'/'))) self.process_words(words) def process_words(self, words, threshold=30000): for word in words: if self.filters.keep(word) and \ word not in self.new_words: if self.get_word_freq: freq = self.get_word_freq(word) if freq > threshold: logging.info("%s\tadded into db" % word) self.new_words[word] = freq else: logging.info("%s\tadded into db" % word) self.new_words[word] = 1 self.n_added += 1 else: self.n_killed +=1
def prepareToCorrelation(self,df,by): """ prepare dataframe to correlation algorithm :param df: input df {pd.DataFrame} :param by: group by column :return: df : input dataframe from originFrame : frames to move analyzedFrameUpdated : stable main frame contains the most really coordinates fewDataFrame : lack of data frame readyToCorr : if data frame is ready to be pushed to correlation algorithm """ _df = df.copy() fewDataFrame = pd.DataFrame() if by == 'User': self.startPoint,noises = Filters.noisyUser(_df,by,col = 'rawPower') #df = self.filters.rollingMean(df,'Power',by = by,noises = noises) if by == 'race_id': self.startPoint = _df['race_id'].iloc[0] analyzedFrame,originFrame = self.splitFrameByMinLen(_df,by) analyzedFrameUpdated,readyToCorr = self.checkBoundaries(originFrame,analyzedFrame,by) if (not readyToCorr): _df.drop(analyzedFrame.index,inplace=True) fewDataFrame = analyzedFrame return _df,originFrame,analyzedFrameUpdated,fewDataFrame,readyToCorr
def sort(self, elem_links, url): fex = Faup() f = Filters() f.load() self.r.switchDB(1) extend = True domainfilter = True schemefilter = True try: for link in elem_links: new_url = link self.r.switchDB(2) if not self.r.get(new_url) and new_url: self.r.switchDB(1) if not self.r.get(new_url): fex.decode(new_url) domain = fex.get_host() if f.isfilteredscheme(fex.get_scheme()): self.r.switchDB(2) self.r.put(new_url, new_url) schemefilter = False if f.isfiltereddomains(domain): self.r.switchDB(2) self.r.put(new_url, new_url) domainfilter = False if f.isfilteredextention(fex.get_resource_path()): extend = False self.r.switchDB(2) self.r.put(new_url, new_url) if extend and domainfilter and schemefilter: self.r.switchDB(1) self.r.rpush('crawl', new_url) self.queue.append(new_url) except TypeError as e: print "TypeError"
def getReviewersAndWatchers(db, repository, commits=None, changesets=None, reviewfilters=None, applyfilters=True, applyparentfilters=False, parentfiltersonly=False): """getReviewersAndWatchers(db, commits=None, changesets=None) -> tuple Returns a tuple containing two dictionaries, each mapping file IDs to dictionaries mapping user IDs to sets of changeset IDs. The first dictionary defines the reviwers of each file, the second dictionary defines the watchers of each file. For any changes in a file for which no reviewer is identified, None is used as a key in the dictionary instead of a real user ID.""" if changesets is None: changesets = [] changeset_utils.createChangesets(db, repository, commits) for commit in commits: changesets.extend(changeset_utils.createChangeset(db, None, repository, commit, do_highlight=False)) cursor = db.cursor() filters = Filters() if applyfilters: if parentfiltersonly: filters.load(db, repository=repository.parent, recursive=True) else: filters.load(db, repository=repository, recursive=applyparentfilters) if reviewfilters: filters.addFilters(db, reviewfilters, sort=True) reviewers = {} watchers = {} for changeset in changesets: author_user_id = changeset.child.author.getUserId(db) if changeset.child else None cursor.execute("SELECT DISTINCT file FROM fileversions WHERE changeset=%s", (changeset.id,)) for (file_id,) in cursor: reviewers_found = False for user_id, (filter_type, delegate) in filters.listUsers(db, file_id).items(): try: assert isinstance(user_id, int) except: raise Exception, repr(filters.listUsers(db, file_id)) if filter_type == 'reviewer': if author_user_id != user_id: reviewer_user_ids = [user_id] elif delegate: reviewer_user_ids = [] for delegate_user_name in delegate.split(","): delegate_user = dbutils.User.fromName(db, delegate_user_name) if delegate_user: reviewer_user_ids.append(delegate_user.id) else: raise Exception, repr((user_id, delegate_user_name, file_id)) else: reviewer_user_ids = [] for reviewer_user_id in reviewer_user_ids: reviewers.setdefault(file_id, {}).setdefault(reviewer_user_id, set()).add(changeset.id) reviewers_found = True else: watchers.setdefault(file_id, {}).setdefault(user_id, set()).add(changeset.id) if not reviewers_found: reviewers.setdefault(file_id, {}).setdefault(None, set()).add(changeset.id) return reviewers, watchers
def addReviewFilters(db, creator, user, review, reviewer_paths, watcher_paths): cursor = db.cursor() cursor.execute( "INSERT INTO reviewassignmentstransactions (review, assigner) VALUES (%s, %s) RETURNING id", (review.id, creator.id)) transaction_id = cursor.fetchone()[0] def add(filter_type, paths): for path in paths: cursor.execute( """SELECT id, type FROM reviewfilters WHERE review=%s AND uid=%s AND path=%s""", (review.id, user.id, path)) row = cursor.fetchone() if row: old_filter_id, old_filter_type = row if old_filter_type == filter_type: continue else: cursor.execute( """DELETE FROM reviewfilters WHERE id=%s""", (old_filter_id, )) cursor.execute( """INSERT INTO reviewfilterchanges (transaction, uid, path, type, created) VALUES (%s, %s, %s, %s, false)""", (transaction_id, user.id, path, old_filter_type)) cursor.execute( """INSERT INTO reviewfilters (review, uid, path, type, creator) VALUES (%s, %s, %s, %s, %s)""", (review.id, user.id, path, filter_type, creator.id)) cursor.execute( """INSERT INTO reviewfilterchanges (transaction, uid, path, type, created) VALUES (%s, %s, %s, %s, true)""", (transaction_id, user.id, path, filter_type)) add("reviewer", reviewer_paths) add("watcher", watcher_paths) filters = Filters() filters.setFiles(db, review=review) filters.load(db, review=review, user=user) if user not in review.reviewers and user not in review.watchers and user not in review.owners: cursor.execute( """INSERT INTO reviewusers (review, uid, type) VALUES (%s, %s, 'manual')""", ( review.id, user.id, )) delete_files = set() insert_files = set() if watcher_paths: # Unassign changes currently assigned to the affected user. cursor.execute( """SELECT reviewfiles.id, reviewfiles.file FROM reviewfiles JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id) WHERE reviewfiles.review=%s AND reviewuserfiles.uid=%s""", (review.id, user.id)) for review_file_id, file_id in cursor: if not filters.isReviewer(user.id, file_id): delete_files.add(review_file_id) if reviewer_paths: # Assign changes currently not assigned to the affected user. cursor.execute( """SELECT reviewfiles.id, reviewfiles.file FROM reviewfiles JOIN changesets ON (changesets.id=reviewfiles.changeset) JOIN commits ON (commits.id=changesets.child) JOIN gitusers ON (gitusers.id=commits.author_gituser) LEFT OUTER JOIN usergitemails ON (usergitemails.email=gitusers.email AND usergitemails.uid=%s) LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id AND reviewuserfiles.uid=%s) WHERE reviewfiles.review=%s AND usergitemails.uid IS NULL AND reviewuserfiles.uid IS NULL""", (user.id, user.id, review.id)) for review_file_id, file_id in cursor: if filters.isReviewer(user.id, file_id): insert_files.add(review_file_id) if delete_files: cursor.executemany( "DELETE FROM reviewuserfiles WHERE file=%s AND uid=%s", izip(delete_files, repeat(user.id))) cursor.executemany( "INSERT INTO reviewassignmentchanges (transaction, file, uid, assigned) VALUES (%s, %s, %s, false)", izip(repeat(transaction_id), delete_files, repeat(user.id))) if insert_files: cursor.executemany( "INSERT INTO reviewuserfiles (file, uid) VALUES (%s, %s)", izip(insert_files, repeat(user.id))) cursor.executemany( "INSERT INTO reviewassignmentchanges (transaction, file, uid, assigned) VALUES (%s, %s, %s, true)", izip(repeat(transaction_id), insert_files, repeat(user.id))) return generateMailsForAssignmentsTransaction(db, transaction_id)
def createReview(db, user, repository, commits, branch_name, summary, description, from_branch_name=None, via_push=False, reviewfilters=None, applyfilters=True, applyparentfilters=False, recipientfilters=None): cursor = db.cursor() if via_push: applyparentfilters = bool( user.getPreference(db, 'review.applyUpstreamFilters')) branch = dbutils.Branch.fromName(db, repository, branch_name) if branch is not None: raise OperationFailure( code="branchexists", title="Invalid review branch name", message="""\ <p>There is already a branch named <code>%s</code> in the repository. You have to select a different name.</p> <p>If you believe the existing branch was created during an earlier (failed) attempt to create this review, you can try to delete it from the repository using the command<p> <pre> git push <remote> :%s</pre> <p>and then press the "Submit Review" button on this page again.""" % (htmlutils.htmlify(branch_name), htmlutils.htmlify(branch_name)), is_html=True) if not commits: raise OperationFailure( code="nocommits", title="No commits specified", message="You need at least one commit to create a review.") commitset = log_commitset.CommitSet(commits) heads = commitset.getHeads() if len(heads) != 1: # There is really no plausible way for this error to occur. raise OperationFailure( code="disconnectedtree", title="Disconnected tree", message=("The specified commits do do not form a single connected " "tree. Creating a review of them is not supported.")) head = heads.pop() if len(commitset.getTails()) != 1: tail_id = None else: tail_id = gitutils.Commit.fromSHA1( db, repository, commitset.getTails().pop()).getId(db) if not via_push: try: repository.createBranch(branch_name, head.sha1) except gitutils.GitCommandError as error: raise OperationFailure( code="branchfailed", title="Failed to create review branch", message=("<p><b>Output from git:</b></p>" "<code style='padding-left: 1em'>%s</code>" % htmlutils.htmlify(error.output)), is_html=True) createChangesetsForCommits(db, commits) try: cursor.execute( "INSERT INTO branches (repository, name, head, tail, type) VALUES (%s, %s, %s, %s, 'review') RETURNING id", [repository.id, branch_name, head.getId(db), tail_id]) branch_id = cursor.fetchone()[0] reachable_values = [(branch_id, commit.getId(db)) for commit in commits] cursor.executemany( "INSERT INTO reachable (branch, commit) VALUES (%s, %s)", reachable_values) cursor.execute( "INSERT INTO reviews (type, branch, state, summary, description, applyfilters, applyparentfilters) VALUES ('official', %s, 'open', %s, %s, %s, %s) RETURNING id", (branch_id, summary, description, applyfilters, applyparentfilters)) review = dbutils.Review.fromId(db, cursor.fetchone()[0]) cursor.execute( "INSERT INTO reviewusers (review, uid, owner) VALUES (%s, %s, TRUE)", (review.id, user.id)) if reviewfilters is not None: cursor.executemany( """INSERT INTO reviewfilters (review, uid, path, type, creator) VALUES (%s, %s, %s, %s, %s)""", [(review.id, filter_user_id, filter_path, filter_type, user.id) for filter_user_id, filter_path, filter_type, filter_delegate in reviewfilters]) is_opt_in = False if recipientfilters is not None: cursor.executemany( "INSERT INTO reviewrecipientfilters (review, uid, include) VALUES (%s, %s, %s)", [(review.id, filter_user_id, filter_include) for filter_user_id, filter_include in recipientfilters]) for filter_user_id, filter_include in recipientfilters: if filter_user_id is None and not filter_include: is_opt_in = True addCommitsToReview(db, user, review, commits, new_review=True) if from_branch_name is not None: cursor.execute( "UPDATE branches SET review=%s WHERE repository=%s AND name=%s", (review.id, repository.id, from_branch_name)) # Reload to get list of changesets added by addCommitsToReview(). review = dbutils.Review.fromId(db, review.id) pending_mails = [] recipients = review.getRecipients(db) for to_user in recipients: pending_mails.extend( mail.sendReviewCreated(db, user, to_user, recipients, review)) if not is_opt_in: recipient_by_id = dict( (to_user.id, to_user) for to_user in recipients) cursor.execute( """SELECT userpreferences.uid, userpreferences.repository, userpreferences.filter, userpreferences.integer FROM userpreferences LEFT OUTER JOIN filters ON (filters.id=userpreferences.filter) WHERE userpreferences.item='review.defaultOptOut' AND userpreferences.uid=ANY (%s) AND (userpreferences.filter IS NULL OR filters.repository=%s) AND (userpreferences.repository IS NULL OR userpreferences.repository=%s)""", (recipient_by_id.keys(), repository.id, repository.id)) user_settings = {} has_filter_settings = False for user_id, repository_id, filter_id, integer in cursor: settings = user_settings.setdefault(user_id, [None, None, {}]) value = bool(integer) if repository_id is None and filter_id is None: settings[0] = value elif repository_id is not None: settings[1] = value else: settings[2][filter_id] = value has_filter_settings = True if has_filter_settings: filters = Filters() filters.setFiles(db, review=review) for user_id, (global_default, repository_default, filter_settings) in user_settings.items(): to_user = recipient_by_id[user_id] opt_out = None if repository_default is not None: opt_out = repository_default elif global_default is not None: opt_out = global_default if filter_settings: # Policy: # # If all of the user's filters that matched files in the # review have review.defaultOptOut enabled, then opt out. # When determining this, any review filters of the user's # that match files in the review count as filters that don't # have the review.defaultOptOut enabled. # # If any of the user's filters that matched files in the # review have review.defaultOptOut disabled, then don't opt # out. When determining this, review filters are ignored. # # Otherwise, ignore the filter settings, and go with either # the user's per-repository or global setting (as set # above.) filters.load(db, review=review, user=to_user) # A set of filter ids. If None is in the set, the user has # one or more review filters in the review. (These do not # have ids.) active_filters = filters.getActiveFilters(to_user) for filter_id in active_filters: if filter_id is None: continue elif filter_id in filter_settings: if not filter_settings[filter_id]: opt_out = False break else: break else: if None not in active_filters: opt_out = True if opt_out: cursor.execute( """INSERT INTO reviewrecipientfilters (review, uid, include) VALUES (%s, %s, FALSE)""", (review.id, to_user.id)) db.commit() mail.sendPendingMails(pending_mails) return review except: if not via_push: repository.run("branch", "-D", branch_name) raise
def assignChanges(db, user, review, commits=None, changesets=None, update=False): cursor = db.cursor() if changesets is None: assert commits is not None changesets = [] for commit in commits: changesets.extend( changeset_utils.createChangeset(db, user, review.repository, commit)) applyfilters = review.applyfilters applyparentfilters = review.applyparentfilters reviewers, watchers = getReviewersAndWatchers( db, review.repository, changesets=changesets, reviewfilters=review.getReviewFilters(db), applyfilters=applyfilters, applyparentfilters=applyparentfilters) cursor.execute("SELECT uid FROM reviewusers WHERE review=%s", (review.id, )) reviewusers = set([user_id for (user_id, ) in cursor]) reviewusers_values = set() reviewuserfiles_values = set() reviewuserfiles_existing = {} if update: cursor.execute( """SELECT reviewuserfiles.uid, reviewfiles.changeset, reviewfiles.file FROM reviewfiles JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id) WHERE reviewfiles.review=%s""", (review.id, )) for user_id, changeset_id, file_id in cursor: reviewuserfiles_existing[(user_id, changeset_id, file_id)] = True new_reviewers = set() new_watchers = set() cursor.execute( """SELECT DISTINCT uid FROM reviewfiles JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id) WHERE review=%s""", (review.id, )) old_reviewers = set([user_id for (user_id, ) in cursor]) for file_id, file_users in reviewers.items(): for user_id, user_changesets in file_users.items(): if user_id: new_reviewers.add(user_id) if user_id not in reviewusers: reviewusers.add(user_id) reviewusers_values.add((review.id, user_id)) for changeset_id in user_changesets: if (user_id, changeset_id, file_id) not in reviewuserfiles_existing: reviewuserfiles_values.add( (user_id, review.id, changeset_id, file_id)) for file_id, file_users in watchers.items(): for user_id, user_changesets in file_users.items(): if user_id: if user_id not in reviewusers: new_watchers.add(user_id) reviewusers.add(user_id) reviewusers_values.add((review.id, user_id)) new_reviewers -= old_reviewers new_watchers -= old_reviewers | new_reviewers cursor.executemany("INSERT INTO reviewusers (review, uid) VALUES (%s, %s)", reviewusers_values) cursor.executemany( "INSERT INTO reviewuserfiles (file, uid) SELECT id, %s FROM reviewfiles WHERE review=%s AND changeset=%s AND file=%s", reviewuserfiles_values) if configuration.extensions.ENABLED: cursor.execute( """SELECT id, uid, extension, path FROM extensionhookfilters WHERE repository=%s""", (review.repository.id, )) rows = cursor.fetchall() if rows: if commits is None: commits = set() for changeset in changesets: commits.add(changeset.child) commits = list(commits) filters = Filters() filters.setFiles(db, list(getFileIdsFromChangesets(changesets))) for filter_id, user_id, extension_id, path in rows: filters.addFilter(user_id, path, None, None, filter_id) for filter_id, file_ids in filters.matched_files.items(): extensions.role.filterhook.queueFilterHookEvent( db, filter_id, review, user, commits, file_ids) return new_reviewers, new_watchers
class PosAlgorithm(): def __init__(self,testDf, mainDf,segmentsStepsDf, correlator, spread = 15, timeStep = 1, ): # original database self.SmoothedDf = pd.io.parsers.read_csv(mainDf,index_col = 'index') # source for section imitation self.testDf = pd.io.parsers.read_csv(testDf,index_col = 'index') self.segmentsStepsDf = pd.io.parsers.read_csv(segmentsStepsDf) self.powerCorrelator = correlator # output database contained predicted points self.predicted_df = None # by default the number of unpredicted segments is 0 self.unpredicted = 0 # the dictionary of coefficients of correlation self.corrCoeffs = {} # range of indexes at the test data frame # range between indexes of grabbed section. # Other words it is just about the time of user's waiting in seconds self.spread = spread # the time step as constant step between rows at the database self.timeStep = timeStep # The number of laccids,grabbed by user. For "byLacCidMod" algorithm it must be more then 2. # Otherwise, it will works as "byLacCid" algorithm # self.numLC = numLC self.filters = Filters() def initGrabbedSet(self): # initialize self.segments variable self.generateRandomSegment() self.grabbedDf = self.getTestSection() #self.truthPoint = self.randomSampling(self.grabbedDf,numsamples = 1) self.truthPoint = self.grabbedDf.tail(1) self.trueSegment = self.truthPoint['segment'].unique().all() def generateRandomSegment(self): """ Generate segment where user is located. :return: """ # additional criterias """ uniqueLc = self.testDf.groupby(['segment'])['laccid'].unique() # get rows with number of laccids more than ... byLc = uniqueLc[uniqueLc.apply(len)>=self.numLC] segments = list(byLc.keys()) # get rows with number of races more than ... byRaces = self.testDf.groupby('segment')['race_id'].unique().apply(len) segments2 = list(byRaces[byRaces>self.numRaces].keys()) # find the intersection of founded sets Segments = set(segments).intersection(segments2) """ # generate test segment # simple random segLens = self.testDf.groupby(['segment']).apply(len) self.randSeg = segLens[segLens>self.spread].sample(1).keys() #print self.randSeg def getTestSection(self): """ Get the dataframe grabbed by user. :return: """ #self.randSeg = ['074-075'] df = self.testDf [self.testDf['segment'].isin(self.randSeg)] self.analyzedDf = df.copy() # filtrate self.analyzedDf = self.filters.medianFilter(self.analyzedDf) # Note! change 'ratio' to 'TimeStamp' for real situation or remove this sorting! #grouped = self.analyzedDf.groupby('ratio').sort('ratio') # generate test slice #firstStamp = 41.0 firstStamp = random.sample(self.analyzedDf[self.analyzedDf.TimeStamp < max(self.analyzedDf.TimeStamp) - self.spread].TimeStamp,1)[0] print " : " + str(firstStamp) self.analyzedDf.loc[:,'grabbed'] = np.nan self.analyzedDf = self.analyzedDf.sort('ratio') #lastIx = self.analyzedDf[self.analyzedDf.TimeStamp == self.analyzedDf.TimeStamp[firstIx] + self.spread].index self.analyzedDf.loc[self.analyzedDf[(self.analyzedDf.TimeStamp>=firstStamp)&(self.analyzedDf.TimeStamp<=firstStamp+self.spread)].index,'grabbed'] = 1 #self.analyzedDf.loc[i:i+self.spread,'grabbed'] = 1 self.analyzedDf['grabbed'] = self.analyzedDf['grabbed'].fillna(0) grabbed_df = self.analyzedDf[self.analyzedDf['grabbed'] == 1] grabbed_df = grabbed_df.sort(['TimeStamp','laccid']) #grabbed_df['index'] = range(0,len(grabbed_df)) return grabbed_df def predict(self,alg,useSmoothed): """ initialize the algorithm of postiioning prediction. :param alg: keyword for algoruthm :return: """ self.corrCoeffs = {} if alg == "r": self.randomSampling(self.SmoothedDf) if alg == "lc": self.byLacCid() if alg == "lcM": self.byLacCidMod() #self.() if alg == "pc": self.byPowerCorr(useSmoothed = useSmoothed) def randomSampling(self,df,numsamples = 50): """ Generate subset from input dataframe. :param df: dataframe to analyse :param numsamples: the number of samples :return: """ rows = random.sample(df.index,numsamples) self.predictedDf = df.ix[rows] self.predicted_segments = self.predictedDf['segment'].unique() return self.predictedDf def byLacCid(self): """ Use Lac and Cid identifiers of Base station only. :return: """ self.grabbed_lc = self.grabbedDf['laccid'].unique() self.predictedDf = self.SmoothedDf[self.SmoothedDf['laccid'].isin(self.grabbed_lc)] self.predicted_segments = self.predictedDf['segment'].unique() if self.predictedDf[self.predictedDf['segment'].isin(self.truthPoint['segment'].unique())].empty == True: self.unpredicted = 1 print self.truthPoint def byLacCidMod(self): predictedInfo = pd.DataFrame() check = True laccids = self.grabbedDf.laccid.unique() if laccids.__len__()>1: actives = self.grabbedDf.Active.unique() uniqueLevels = {'before':self.spread,'after':self.spread} changedLcs = self.extractChanges() if changedLcs: predictedInfo = self.findChanges(changedLcs,uniqueLevels) else: if actives.__len__()>1: predictedInfo = self.findActives(uniqueLevels) if not predictedInfo.empty: predictedDf = self.reduceByChanges(predictedInfo) self.predictedDf = predictedDf.sort(columns = ['segment','ratio','laccid']) if predictedInfo.empty: self.unpredicted = 1 else: check = False return check def reduceByChanges(self,predictedInfo): predictedDf = pd.DataFrame() grouped = self.predictedDf.groupby('segment') for seg,gr in grouped: segInfo = predictedInfo[predictedInfo.segment == seg] for ix,row in segInfo.iterrows(): #it might be more than one if segment contains several "change points" _gr = gr[(gr.ratio>=row['left'])&(gr.ratio<=row['right'])] predictedDf = pd.concat([predictedDf,_gr]) predictedDf = predictedDf.drop_duplicates() return predictedDf def extractChanges(self): grouped = self.grabbedDf.groupby(['TimeStamp']) LcsPrev = np.array([]) changed = [] for ts,gr in grouped: uniqueLcs = gr.laccid.unique() if len(LcsPrev)>0: uniqueLcsNext = uniqueLcs if sorted(list(LcsPrev))!=sorted(list(uniqueLcsNext)): changed.append({'prev':list(LcsPrev),'next':list(uniqueLcsNext)}) LcsPrev = uniqueLcs if not len(LcsPrev)>0: LcsPrev = uniqueLcs return changed def findChanges(self,changedLcs,uniqueLevels): grouped = self.predictedDf.groupby(['segment','ratio']) predictedInfo = pd.DataFrame() LcsPrev = np.array([]) ix = 0 #LcsNext = None for pare in changedLcs: for (seg,rat),gr in grouped: if len(LcsPrev)>0: uniqueLcsNext = gr.laccid.unique() if (pare['next'] in uniqueLcsNext)&(pare['next'] not in LcsPrev): leftDelta,rightDelta = self.findDiff(seg,uniqueLevels) row = pd.DataFrame({'segment':seg,'left':prevPoint-leftDelta,'right':rat+rightDelta},index = [ix]) predictedInfo = pd.concat([predictedInfo,row]) ix+=1 LcsPrev = np.array([]) if not (LcsPrev)>0: uniqueLcsPrev = gr.laccid.unique() if pare['prev'] in uniqueLcsPrev: LcsPrev = uniqueLcsPrev prevPoint = rat else: LcsPrev = np.array([]) prevPoint = None return predictedInfo def findActives(self,uniqueLevels): lcGrouped = self.grabbedDf.groupby('TimeStamp').\ filter(lambda x : len(x)>1).groupby('TimeStamp').\ apply(lambda x: np.unique(x['laccid'])) laccidsAll = np.unique(lcGrouped.to_dict().values()) filtered = self.predictedDf.groupby(['segment','ratio']).filter(lambda x : len(x)>1) activeGroup = filtered.groupby(['segment','ratio']) activePoints = activeGroup['laccid'].apply(np.unique) d = activePoints.apply(lambda x: sorted(list(x)) == sorted(laccidsAll)).to_dict() predictedFrame = pd.DataFrame([key for key in d.keys() if d[key] == True],columns = ['segment','ratio']).sort(['segment','ratio']) predictedInfo = self.extractBounds(predictedFrame,uniqueLevels) return predictedInfo def extractBounds(self,frame,uniqueLevels = 'default',clip = True): """ Extract minimum and maximum ratios from the frame by each segment and clip predicted earlier frame by them. :param frame: frame contains "active points" with 2 fields : segment and ratio {pd.DataFrame} :param uniqueLevels: length of boundaries by which is need to clip (seconds) {int} :param clip: if need to clip or not {boolean} :return: clipped dataFrame {pd.DataFrame} """ if uniqueLevels == 'default': uniqueLevels = {'after':0,'before':0} leftDelta,rightDelta = 0,0 grouped = frame.groupby('segment') Predicted = pd.DataFrame() for seg,gr in grouped: _gr = pd.DataFrame({'segment':[seg]}) if not clip: leftDelta,rightDelta = self.findDiff(seg,uniqueLevels) _gr.loc[:,'left'],_gr.loc[:,'right'] = min(gr['ratio'])-leftDelta,max(gr['ratio'])+rightDelta Predicted = pd.concat([Predicted,_gr]) return Predicted def findDiff(self,seg,spread): #frame = frame.sort(['segment','ratio','laccid']) #diffs = np.diff(frame['ratio'],1) interpStep = self.segmentsStepsDf[self.segmentsStepsDf.segment == seg].interpStep.values[0] #diffs[diffs!=0][0] left,right = interpStep*spread['before'],interpStep*spread['after'] return left,right def byLacCidMod2(self): """ Use the information from neighbours laccids. :return: """ #Note! Attach probability according length of founded laccids for each step. # For example,probability for sublist with length 4 more than siblist with length 2, # because this means that in the first case 4 cell's stations were founded correctly, when # in the second case only 2. But it might be lack of the data in origin database. predicted_segments =[] # get predicted frame and segments according base laccid algorithm #self.byLacCid() self.unpredicted = 0 # iterate by laccids at grabbed list of laccids. for step in range(len(self.grabbed_lc),1,-1): # check all combinations for sublist in itertools.combinations(self.grabbed_lc,step): predicted_subDf = self.predictedDf[self.predictedDf['laccid'].isin(sublist)] segments = predicted_subDf['segment'].unique() # find the right segments for this combination for seg in segments: seg_subDf = predicted_subDf[predicted_subDf['segment'] == seg] lc_subList = seg_subDf['laccid'].unique() if (set(sublist).issubset(set(lc_subList))) == True: predicted_segments.append(seg) if predicted_segments!=[]: break # if something founded - reduce the selection of predicted segments. if predicted_segments!=[]: self.predictedDf = self.predictedDf[self.predictedDf['segment'].isin(predicted_segments)] # if no segments - use the segments from base algorithm. else: self.unpredicted = 1 def byPowerCorr(self, useSmoothed = False): """ The input segment should contains varying of signal. Only in this case Suppose that user's telephone grabbed not only the base station but neighbours too it is possible to identify truth position :return: predicted data frame. """ self.unpredicted = 0 self.resultsDf = pd.DataFrame() predictedDf = pd.DataFrame() fullPredicted = pd.DataFrame() # dataFrame contained control Rows. ReducingTypes = {'byAbs':'maxLimit','byCorr':'localMaxima'} # 1. Split phone data on base step's sections. if useSmoothed ==True: #self.interpPowers = self.grabbedDf.groupby(['laccid'])['Power'].apply(list).to_dict() self.interpPowers = list(self.grabbedDf['Power']) else: self.interpolateByTimeStep() # 2. Compare powers of grabbed log with powers from database # a) If the variance of grabbed log close to zero --> compare Mean by list of absolute Power values. # b) Else --> compare the coefficients of correlation # If corrCoeff < 0 : extract this indexes from predicted dataFrame # If corrCoeff > 0 : find local maximums at the list of corrCoeffs and # extract all of the others from predicted dataFrame absMeans = self.powerCorrelator.analyzeLC(self.grabbedDf.groupby(['laccid'])['Power'].apply(list).to_dict()) # Extract indexes iteratively powersDf = self.predictedDf.groupby(['segment']) first,last = 0,0 for (seg,SegLcGroup) in powersDf: #analyzedSection = self.interpPowers[lc] analyzedSection = self.interpPowers if len(self.grabbed_lc) == 1: method = self.powerCorrelator.checkPredictionMethod(self.grabbed_lc[0], absMeans) else: method = 'byCorr' redType = ReducingTypes[method] predictedPart,allPredicted,last = self.powerCorrelator.loopThroughLaccid(SegLcGroup,method,analyzedSection,redType,return_all=True,last = last) predictedPart['sliceNumber'] = range(first,last) first = last predictedDf = pd.concat([predictedDf,predictedPart]) fullPredicted = pd.concat([fullPredicted,allPredicted]) if predictedDf.empty != True: controlCheck = 'controls' not in predictedDf.columns.values if controlCheck == True: print "" self.predictedDf = predictedDf self.fullPredicted = fullPredicted else: self.unpredicted = 1 def interpolateByTimeStep(self): """ Linear interpolation of grabbed log by the constant. :return: the dictionary were key is the LAC-CID and value is the array of interpolated powers """ self.interpPowers = {} old = self.grabbedDf.groupby(['laccid'])['TimeStamp']\ .apply(lambda x: list((x -min(x))/1000)) new = self.grabbedDf.groupby(['laccid'])['TimeStamp']\ .apply(lambda x: range(0,max(x -min(x))/1000+1,self.timeStep)) for lc in old.keys(): self.interpPowers[lc] = np.interp(new[lc], old[lc], self.grabbedDf.loc[self.grabbedDf['laccid'] == lc, 'Power'])
class Tracker(object): """ This is the main program which gives a high-level view of all the running subsystems. It connects camera input with output in form of "actions" (such as keyboard shortcuts on the users behalf). This is done by locating a hand in an image and detecting features, like the number of fingers, and trying to match that data with a known gesture. """ def __init__(self): """ Configuration """ # Camera settings self.FRAME_WIDTH = 341 self.FRAME_HEIGHT = 256 self.flip_camera = True # Mirror image self.camera = cv2.VideoCapture(1) # ...you can also use a test video for input #video = "/Users/matthiasendler/Code/snippets/python/tracker/final/assets/test_video/10.mov" #self.camera = cv2.VideoCapture(video) #self.skip_input(400) # Skip to an interesting part of the video if not self.camera.isOpened(): print "couldn't load webcam" return #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, self.FRAME_WIDTH) #self.camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, self.FRAME_HEIGHT) self.filters_dir = "filters/" # Filter settings in trackbar self.filters_file = "filters_default" # Load filter settings current_config = self.filters_dir + self.filters_file self.filters = Filters(current_config) # No actions will be triggered in test mode # (can be used to adjust settings at runtime) self.test_mode = False # Create a hand detector # In fact, this is a wrapper for many detectors # to increase detection confidence self.detector = Detector(self.filters.config) # Knowledge base for all detectors self.kb = KB() # Create gesture recognizer. # A gesture consists of a motion and a hand state. self.gesture = Gesture() # The action module executes keyboard and mouse commands self.action = Action() # Show output of detectors self.output = Output() self.run() def run(self): """ In each step: Read the input image and keys, process it and react on it (e.g. with an action). """ while True: img = self.get_input() hand = self.process(img) ref = self.action.get_reference_point() self.output.show(img, hand, ref) def process(self, img): """ Process input """ # Run detection hand = self.detector.detect(img) # Store result in knowledge base self.kb.update(hand) if not self.test_mode: # Try to interprete as gesture self.interprete(hand) return hand def interprete(self, hand): """ Try to interprete the input as a gesture """ self.gesture.add_hand(hand) operation = self.gesture.detect_gesture() self.action.execute(operation) def get_input(self): """ Get input from camera and keyboard """ self.get_key() _, img = self.camera.read() img = cv2.resize(img, (self.FRAME_WIDTH, self.FRAME_HEIGHT)) if self.flip_camera: img = cv2.flip(img, 1) return img def get_key(self): """ Read keyboard input """ key = cv2.waitKey(self.filters.config["wait_between_frames"]) if key == ord('+'): # Reduce program speed self.filters.config["wait_between_frames"] += 500 if key == ord('-'): # Increase program speed if self.filters.config["wait_between_frames"] >= 500: self.filters.config["wait_between_frames"] -= 500 #if key == ord('s'): # Save config # self.filters.save() if key == ord('r'): # Reset all detectors self.detector.reset() self.action.reset() if key == ord('d'): # Make a screenshot self.output.make_screenshot() if key == ord('p') or key == ord(' '): # Pause cv2.waitKey() if key == ord('t'): # Test mode self.test_mode = not self.test_mode if key == ord('1'): self.output.toggle_estimate() if key == ord('2'): self.output.toggle_detectors() if key == ord('3'): self.output.toggle_skin() if key == ord('f'): self.toggle_filters() if key == 63235: # Right arrow self.skip_input(20) if key == 27 or key == ord('q'): # Abort program on ESC, q or space exit() def toggle_filters(self): """ Load the next filter settings """ self.filters_file = self.next_filters_file() current_config = self.filters_dir + self.filters_file self.filters.set_config(current_config) def next_filters_file(self): """ Get the next filter settings """ filters = listdir(self.filters_dir) for i, f in enumerate(filters): if f == self.filters_file: return filters[(i+1) % len(filters)] def skip_input(self, x=1): """ Skip to a different part of a video sequence. """ for i in range(0,x): self.camera.grab()
def marrHildrethDetector(self,img,slope_threshold,shape=(5,5),sigma=5): filter = Filters() op = Operations() log = filter.laplaceOfGaussianKernel(shape,sigma) detect = op.doConvolution(log,img) return detect
def setup_filter(self, time, data, **kwargs): """ Load the callibration runs and convert voltage signal to yaw angles Parameters ---------- time : ndarray(k) data : ndarray(k) Returns ------- time_stair : ndarray(n) Average time stamp over the stair step data_stair : ndarray(n) Average value of the selected stair step """ # time and data should both be 1D and have the same shape! assert time.shape == data.shape runid = kwargs.get('runid', self.runid) # smoothen method: spline or moving average smoothen = kwargs.get('smoothen', 'spline') # what is the window of the moving average in seconds smooth_window = kwargs.get('smooth_window', 2) # specify the window of the staircase #start, end = 30100, -30001 start = kwargs.get('start', 0) end = kwargs.get('end', len(time)) dt = kwargs.get('dt', 1) cutoff_hz = kwargs.get('cutoff_hz', None) self.points_per_stair = kwargs.get('points_per_stair', 20) # at what is the minimum required value on dt or dt2 for a new stair self.stair_step_tresh = kwargs.get('stair_step_tresh', 1) # plot_data = kwargs.get('plot_data', False) # respath = kwargs.get('respath', None) # run = kwargs.get('run', None) # sample rate of the signal sample_rate = calc_sample_rate(time) # prepare the data time = time[start:end] # the actual raw signal data = data[start:end] # ------------------------------------------------- # Progress plotting # ---------------------------------------------- if self.plt_progress: plt.figure() Pxx, freqs = plt.psd(data, Fs=sample_rate, label='data') plt.show() plt.figure() plt.plot(time, data, label='raw data') # ------------------------------------------------- # setup plot # ------------------------------------------------- # labels = np.ndarray(3, dtype='<U100') # labels[0] = label # labels[1] = 'yawchan derivative' # labels[2] = 'psd' # remove any underscores for latex printing grandtitle = self.figfile.replace('_', '\_') plot = plotting.A4Tuned(scale=1.5) plot.setup(self.figpath+self.figfile+'_filter', nr_plots=3, grandtitle=grandtitle, wsleft_cm=1.5, wsright_cm=1.8, hspace_cm=1.2, size_x_perfig=10, size_y_perfig=5, wsbottom_cm=1.0, wstop_cm=1.5) # ------------------------------------------------- # plotting original and smoothend signal # ------------------------------------------------- ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 1) ax1.plot(time, data, 'b', label='raw data', alpha=0.6) data_raw = data.copy() # ------------------------------------------------- # signal frequency filtering, if applicable # ------------------------------------------------- # filter the local derivatives if applicable if cutoff_hz: filt = Filters() data_filt, N, delay = filt.fir(time, data, ripple_db=20, freq_trans_width=0.5, cutoff_hz=cutoff_hz, figpath=self.figpath, figfile=self.figfile + 'filter_design', sample_rate=sample_rate, plot=False,) if self.plt_progress: # add the results of the filtering technique plt.plot(time[N-1:], data_filt[N-1:], 'r', label='freq filt') data = data_filt time = time[N-1:]#-delay else: N = 1 # ------------------------------------------------------- # smoothen the signal with some splines or moving average # ------------------------------------------------------- # NOTE: the smoothing will make the transitions also smoother. This # is not good. The edges of the stair need to be steep! # for the binary data this is actually a good thing, since the dt's # are almost always the same between time steps. We would otherwise # need a dt based on several time steps if smoothen == 'splines': print 'start applying spline ...', uni_spline = UnivariateSpline(time, data) data = uni_spline(time) print 'done!' NN = 0 # no time shift due to filtering? if self.plt_progress: plt.plot(time, data, label='spline data') elif smoothen == 'moving': print 'start calculating movering average ...', filt = Filters() # take av2s window, calculate the number of samples per window ws = int(smooth_window*sample_rate) data = filt.smooth(data, window_len=ws, window='hanning') NN = len(data) - len(time) data = data[NN:] print 'done!' if self.plt_progress: plt.plot(time, data, label='moving average') else: raise ValueError, 'smoothen method should be moving or splines' # ------------------------------------------------- # additional smoothening: downsampling # ------------------------------------------------- # and up again in order not to brake the plotting further down time_down = np.arange(time[0], time[-1], 0.1) data_down = sp.interpolate.griddata(time, data, time_down) # and upsampling again data = sp.interpolate.griddata(time_down, data_down, time) # ------------------------------------------------- # plotting original and smoothend signal # ------------------------------------------------- ax1.plot(time, data, 'r', label='data smooth') ax1.grid(True) leg1 = ax1.legend(loc='best') leg1.get_frame().set_alpha(0.5) ax1.set_title('smoothing method: ' + smoothen) # ------------------------------------------------- # local derivatives of the signal and filtering # ------------------------------------------------- data_dt = np.ndarray(data.shape) data_dt[1:] = data[1:] - data[0:-1] data_dt[0] = np.nan data_dt = np.abs(data_dt) # frequency filter was applied here originally data_filt_dt = data_dt # if no threshold is given, just take the 20% of the max value dt_max = np.nanmax(np.abs(data_filt_dt))*0.2 dt_treshold = kwargs.get('dt_treshold', dt_max) # ------------------------------------------------- # filter dt or dt2 above certain treshold? # ----------------------------------------------- # only keep values which are steady, meaning dt signal is low! if dt == 2: tmp = np.ndarray(data_filt_dt.shape) tmp[1:] = data_filt_dt[1:] - data_filt_dt[0:-1] tmp[0] = np.nan data_filt_dt = tmp # based upon the filtering, only select data points for which the # filtered derivative is between a certain treshold staircase_i = np.abs(data_filt_dt).__ge__(dt_treshold) # reduce to 1D staircase_arg=np.argwhere(np.abs(data_filt_dt)<=dt_treshold).flatten() # ------------------------------------------------- # replace values for too high dt with Nan # ------------------------------------------------ # --------------------------------- # METHOD version2, slower because of staircase_arg computation above data_masked = data.copy() data_masked[staircase_i] = np.nan data_masked_dt = data_filt_dt.copy() data_masked_dt[staircase_i] = np.nan data_trim = data[staircase_arg] time_trim = time[staircase_arg] print 'max in data_masked_dt:', np.nanmax(data_masked_dt) # --------------------------------- # METHOD version2, faster if staircase_arg is not required! ## make a copy of the original signal and fill in Nans on the selected ## values #data_masked = data.copy() #data_masked[staircase_i] = np.nan # #data_masked_dt = data_filt_dt.copy() #data_masked_dt[staircase_i] = np.nan # ## remove all the nan values #data_trim = data_masked[np.isnan(data_masked).__invert__()] #time_trim = time[np.isnan(data_masked).__invert__()] # #dt_noise_treshold = np.nanmax(data_masked_dt) #print 'max in data_masked_dt', dt_noise_treshold # --------------------------------- # # figure out which dt's are above the treshold # data_trim2 = data_trim.copy() # data_trim2.sort() # data_trim2. # # where the dt of the masked format is above the noise treshold, # # we have a stair # data_trim_dt = np.abs(data_trim[1:] - data_trim[:-1]) # argstairs = data_trim_dt.__gt__(dt_noise_treshold) # data_trim2 = data_trim_dt.copy() # data_trim_dt.sort() # data_trim_dt.__gt__(dt_noise_treshold) # ------------------------------------------------- # intermediate checking of the signal # ------------------------------------------------- if self.plt_progress: # add the results of the filtering technique plt.plot(time[N-1:], data_masked[N-1:], 'rs', label='data red') plt.legend(loc='best') plt.grid(True) plt.twinx() # plt.plot(time, data_filt_dt, label='data_filt_dt') plt.plot(time, data_masked_dt, 'm', label='data\_masked\_dt', alpha=0.4) plt.legend(loc='best') plt.show() print 'saving plt_progress:', print self.figpath+'filter_design_progress.png' plt.savefig(self.figpath+'filter_design_progress.png') # ------------------------------------------------- # check if we have had sane filtering # ------------------------------------------------- print 'data :', data.shape print 'data_trim :', data_trim.shape print 'trim ratio:', len(data)/len(data_trim) # there should be at least one True value assert staircase_i.any() # they can't all be True, than filtering is too heavy if len(data_trim) < len(data)*0.01: msg = 'dt_treshold is too low, not enough data left' raise ValueError, msg # if no data is filtered at all, filtering is too conservative elif len(data_trim) > len(data)*0.95: msg = 'dt_treshold is too high, too much data left' raise ValueError, msg # if the data array is too big, abort on memory concerns if len(data_trim) > 200000: msg = 'too much data points for stair case analysis (cfr memory)' raise ValueError, msg # ------------------------------------------------- # read the average value over each stair (time and data) # ------------------------------------------------ #try: ##np.save('time_trim', time_trim) ##np.save('data_trim', data_trim) ##np.save('staircase_arg', staircase_arg) ##tmp = np.array([self.points_per_stair, self.stair_step_tresh]) ##np.save('tmp', tmp) #data_ordered, time_stair, data_stair, arg_stair \ #= cython_func.order_staircase(time_trim, data_trim, #staircase_arg, self.points_per_stair, self.stair_step_tresh) #except ImportError: data_ordered, time_stair, data_stair, arg_stair \ = self.order_staircase(time_trim, data_trim, staircase_arg) # convert the arg_stair to a flat set and replace start/stop pairs # with all indices in between. Now we can select all stair values # in the raw dataset arg_st_fl = np.empty(data_raw.shape, dtype=np.int) i = 0 for k in range(arg_stair.shape[1]): #print '%6i %6i' % (arg_stair[0,k],arg_stair[1,k]) tmp = np.arange(arg_stair[0,k], arg_stair[1,k]+1, 1, dtype=np.int) #print tmp, '->', i, ':', i+len(tmp) arg_st_fl[i:i+len(tmp)] = tmp i += len(tmp) # remove the unused elements from the array arg_st_fl = arg_st_fl[:i] # ------------------------------------------------- # plotting of smoothen signal and stairs # ------------------------------------------------- ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 2) ax1.plot(time, data, label='data smooth', alpha=0.6) # add the results of the filtering technique ax1.plot(time[N-1:], data_masked[N-1:], 'r', label='data masked') # ax1.plot(time[N-1:], data_filt[N-1:], 'g', label='data_filt') # also include the selected chair data figlabel = '%i stairs' % data_stair.shape[0] ax1.plot(time_stair, data_stair, 'ko', label=figlabel, alpha=0.4) ax1.grid(True) # the legend, on or off? #leg1 = ax1.legend(loc='upper left') #leg1.get_frame().set_alpha(0.5) # ------------------------------------------------- # plotting derivatives on right axis # ------------------------------------------------- ax1b = ax1.twinx() # ax1b.plot(time[N:]-delay,data_s_dt[N:],alpha=0.2,label='data_s_dt') ax1b.plot(time[N:], data_filt_dt[N:], 'r', alpha=0.35, label='data\_filt\_dt') majorFormatter = FormatStrFormatter('%8.1e') ax1b.yaxis.set_major_formatter(majorFormatter) # ax1b.plot(time[N:], data_masked_dt[N:], 'b', alpha=0.2, # label='data_masked_dt') # ax1b.plot(time[N-1:]-delay, filtered_x_dt[N-1:], alpha=0.2) # leg1b = ax1b.legend(loc='best') # leg1b.get_frame().set_alpha(0.5) # ax1b.grid(True) # ------------------------------------------------- # 3th plot to check if the raw chair signal is ok # ------------------------------------------------- ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 3) ax1.plot(time[arg_st_fl], data_raw[arg_st_fl], 'k+', label='rawstair', alpha=0.1) ax1.plot(time[N-1:], data_masked[N-1:], 'r', label='data masked') ax1.set_xlabel('time [s]') # ------------------------------------------------- # the power spectral density # ------------------------------------------------- # ax3 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 3) # Pxx, freqs = ax3.psd(data, Fs=sample_rate, label='data smooth') ## Pxx, freqs = ax3.psd(data_dt, Fs=sample_rate, label='data_dt') ## Pxx, freqs = ax3.psd(data_filt_dt[N-1:], Fs=sample_rate, ## label='data_filt_dt') # ax3.legend() ## print Pxx.shape, freqs.shape plot.save_fig() # ------------------------------------------------- # get amplitudes of the stair edges # ------------------------------------------------- # # max step # data_trim_dt_sort = data_trim_dt.sort()[0] # # estimate at what kind of a delta we are looking for when changing # # stairs # data_dt_std = data_trim_dt.std() # data_dt_mean = (np.abs(data_trim_dt)).mean() # # time_data_dt = np.transpose(np.array([time, data_filt_dt])) # data_filt_dt_amps = HawcPy.dynprop().amplitudes(time_data_dt, h=1e-3) # # print '=== nr amplitudes' # print len(data_filt_dt_amps) # print data_filt_dt_amps # ------------------------------------------------- # save the data # ------------------------------------------------- filename = runid + '-time_stair' np.savetxt(self.pprpath + filename, time_stair) filename = runid + '-data_stair' np.savetxt(self.pprpath + filename, data_stair) # in order to maintain backwards compatibility, save the arguments # of the stair to self self.arg_st_fl = arg_st_fl # flat, contains all indices on the stairs # start/stop indeces for stair k = arg_stair[0,k], arg_stair[1,k] self.arg_stair = arg_stair return time_stair, data_stair
def setup_filter(self, respath, run, **kwargs): """ Load the callibration runs and convert voltage signal to yaw angles """ # specify the window of the staircase #start, end = 30100, -30001 start = kwargs.get('start', None) end = kwargs.get('end', None) figpath = kwargs.get('figpath', None) # figfile = kwargs.get('figfile', None) dt_treshold = kwargs.get('dt_treshold', None) # plot_data = kwargs.get('plot_data', False) # respath = kwargs.get('respath', None) # run = kwargs.get('run', None) # load the dspace mat file dspace = ojfresult.DspaceMatFile(respath + run) # the yaw channel ch = 6 # or a more robust way of determining the channel number ch = dspace.labels_ch['Yaw Laser'] # sample rate of the signal sample_rate = calc_sample_rate(dspace.time) # file name based on the run file figfile = dspace.matfile.split('/')[-1] + '_ch' + str(ch) # prepare the data time = dspace.time[start:end] # the actual yaw signal data = dspace.data[start:end,ch] # ------------------------------------------------- # smoothen the signal with some splines # ------------------------------------------------- # NOTE: the smoothing will make the transitions also smoother. This # is not good. The edges of the stair need to be steep! # smoothen = UnivariateSpline(dspace.time, dspace.data[:,ch], s=2) # data_s_full = smoothen(dspace.time) # # first the derivatices # data_s_dt = data_s_full[start+1:end+1]-data_s_full[start:end] # # than cut it off # data_s = data_s_full[start:end] # ------------------------------------------------- # local derivatives of the yaw signal and filtering # ------------------------------------------------- data_dt = dspace.data[start+1:end+1,ch]-dspace.data[start:end,ch] # filter the local derivatives filt = Filters() data_filt, N, delay = filt.fir(time, data, ripple_db=20, freq_trans_width=0.5, cutoff_hz=0.3, plot=False, figpath=figpath, figfile=figfile + 'filter_design', sample_rate=sample_rate) data_filt_dt = np.ndarray(data_filt.shape) data_filt_dt[1:] = data_filt[1:] - data_filt[0:-1] data_filt_dt[0] = np.nan # ------------------------------------------------- # smoothen the signal with some splines # ------------------------------------------------- # smoothen = UnivariateSpline(time, data_filt, s=2) # data_s = smoothen(time) # # first the derivatices # data_s_dt = np.ndarray(data_s.shape) # data_s_dt[1:] = data_s[1:]-data_s[:-1] # data_s_dt[0] = np.nan # ------------------------------------------------- # filter values above certain treshold # ------------------------------------------------ # only keep values which are steady, meaning dt signal is low! # based upon the filtering, only select data points for which the # filtered derivative is between a certain treshold staircase_i = np.abs(data_filt_dt).__ge__(dt_treshold) # make a copy of the original signal and fill in Nans on the selected # values data_reduced = data.copy() data_reduced[staircase_i] = np.nan data_reduced_dt = np.ndarray(data_reduced.shape) data_reduced_dt[1:] = np.abs(data_reduced[1:] - data_reduced[:-1]) data_reduced_dt[0] = np.nan nonnan_i = np.isnan(data_reduced_dt).__invert__() dt_noise_treshold = data_reduced_dt[nonnan_i].max() print ' dt_noise_treshold ', dt_noise_treshold # remove all the nan values data_trim = data_reduced[np.isnan(data_reduced).__invert__()] time_trim = time[np.isnan(data_reduced).__invert__()] # # figure out which dt's are above the treshold # data_trim2 = data_trim.copy() # data_trim2.sort() # data_trim2. # # where the dt of the reduced format is above the noise treshold, # # we have a stair # data_trim_dt = np.abs(data_trim[1:] - data_trim[:-1]) # argstairs = data_trim_dt.__gt__(dt_noise_treshold) # data_trim2 = data_trim_dt.copy() # data_trim_dt.sort() # data_trim_dt.__gt__(dt_noise_treshold) # ------------------------------------------------- # read the average value over each stair (time and data) # ------------------------------------------------ data_ordered, time_stair, data_stair = self.order_staircase(time_trim, data_trim, dt_noise_treshold*4.) # ------------------------------------------------- # setup plot # ------------------------------------------------- labels = np.ndarray(3, dtype='<U100') labels[0] = dspace.labels[ch] labels[1] = 'yawchan derivative' labels[2] = 'psd' plot = plotting.A4Tuned() title = figfile.replace('_', ' ') plot.setup(figpath+figfile+'_filter', nr_plots=2, grandtitle=title, figsize_y=20, wsleft_cm=2., wsright_cm=2.5) # ------------------------------------------------- # plotting of signal # ------------------------------------------------- ax1 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 1) ax1.plot(time, data, label='data') # add the results of the filtering technique time_stair, data_stair ax1.plot(time[N-1:], data_reduced[N-1:], 'r', label='data red') # ax1.plot(time[N-1:], data_filt[N-1:], 'g', label='data_filt') # also include the selected chair data label = '%i stairs' % data_stair.shape[0] ax1.plot(time_stair, data_stair, 'ko', label=label, alpha=0.2) ax1.grid(True) ax1.legend(loc='lower left') # ------------------------------------------------- # plotting derivatives on right axis # ------------------------------------------------- ax1b = ax1.twinx() # ax1b.plot(time[N:]-delay,data_s_dt[N:],alpha=0.2,label='data_s_dt') ax1b.plot(time[N:], data_filt_dt[N:], 'r', alpha=0.2, label='data filt dt') # ax1b.plot(time[N:], data_reduced_dt[N:], 'b', alpha=0.2, # label='data_reduced_dt') # ax1b.plot(time[N-1:]-delay, filtered_x_dt[N-1:], alpha=0.2) ax1b.legend() ax1b.grid(True) # ------------------------------------------------- # the power spectral density # ------------------------------------------------- ax3 = plot.fig.add_subplot(plot.nr_rows, plot.nr_cols, 2) Pxx, freqs = ax3.psd(data, Fs=sample_rate, label='data') Pxx, freqs = ax3.psd(data_dt, Fs=sample_rate, label='data dt') # Pxx, freqs = ax3.psd(data_s_dt, Fs=sample_rate, label='data_s_dt') Pxx, freqs = ax3.psd(data_filt_dt[N-1:], Fs=sample_rate, label='data filt dt') ax3.legend() # print Pxx.shape, freqs.shape plot.save_fig() # ------------------------------------------------- # get amplitudes of the stair edges # ------------------------------------------------- # # max step # data_trim_dt_sort = data_trim_dt.sort()[0] # # estimate at what kind of a delta we are looking for when changing # # stairs # data_dt_std = data_trim_dt.std() # data_dt_mean = (np.abs(data_trim_dt)).mean() # # time_data_dt = np.transpose(np.array([time, data_filt_dt])) # data_filt_dt_amps = HawcPy.dynprop().amplitudes(time_data_dt, h=1e-3) # # print '=== nr amplitudes' # print len(data_filt_dt_amps) # print data_filt_dt_amps return time_stair, data_stair
class Client(DirectObject): """ Client class handels gui/input audio and rendering """ def __init__(self): log.debug('Starting Client') #open a window... but first set all the needed props wp=self.loadWindoProperites() #open the window base.openMainWindow(props = wp) #base.setBackgroundColor(0.06, 0.1, 0.12, 1) base.setBackgroundColor(0.0, 0.0, 0.0, 1) base.disableMouse() base.enableParticles() #needed to determine what window event fired self.window_focused=base.win.getProperties().getForeground() self.window_x=base.win.getXSize() self.window_y=base.win.getYSize() self.window_minimized=base.win.getProperties().getMinimized() #filter manager, post process self.filters=Filters() #audio sound effects (sfx) + music self.audio=Audio() self.audio.setMusic('background') self.audio.playMusic() #light manager self.lights=LightManager() #setup the user interface (gui+key/mouse bind) self.ui=UserInterface() #skybox self.sun_and_sky=Skybox(self.lights) #player (character) droid self.droid=PCDroid(self.ui) #some vars used later self.map_name=None self.loading_status=set() self.level_root=render.attachNewNode('level_root') self.level_root.hide() self.is_in_game=False #events base.win.setCloseRequestEvent('exit-event') self.accept('exit-event',self.onClientExit) self.accept( 'window-event', self.onWindowEvent) self.accept( 'window-reset', self.onWindowReset) self.accept( 'client-mouselock', self.setMouseLock) self.accept( 'load-level', self.onLevelLoad) self.accept( 'loading-done', self.onLoadingDone) self.accept( 'reload-shaders', self.onShaderReload) self.accept( 'client-set-team', self.onTeamCahnge) self.accept( 'client-quit', self.onQuit) # Task taskMgr.add(self.update, 'client_update') log.debug('Client started') def doSomeStuffTsk(self, task): x=deque(range(5000)) for i in xrange(999): random.shuffle(x) #print i, x[0] #print 'done' return task.done def setMouseLock(self, lock): wp = WindowProperties.getDefault() if lock: wp.setMouseMode(WindowProperties.M_confined) else: wp.setMouseMode(WindowProperties.M_relative) if not cfg['use-os-cursor']: wp.setCursorHidden(True) base.win.requestProperties(wp) def loadWindoProperites(self): #check if we can open a fullscreen window at the requested size if cfg['fullscreen']: mods=[] for mode in base.pipe.getDisplayInformation().getDisplayModes(): mods.append([mode.width, mode.height]) if list(cfg['win-size']) not in mods: cfg['fullscreen']=False log.warning('Can not open fullscreen window at '+str(cfg['win-size'])) #the window props should be set by this time, but make sure wp = WindowProperties.getDefault() try: wp.setUndecorated(cfg['undecorated']) wp.setFullscreen(cfg['fullscreen']) wp.setSize(cfg['win-size'][0],cfg['win-size'][1]) wp.setFixedSize(cfg['win-fixed-size']) except: log.warning('Failed to set window properties, Traceback:') for error in traceback.format_exc().splitlines()[1:]: log.warning(error.strip()) #these probably won't be in the config (?) wp.setOrigin(-2,-2) wp.setTitle('A4P') if not cfg['use-os-cursor']: wp.setCursorHidden(True) return wp def loadLevel(self, task): log.debug('Client loading level...') with open(path+'maps/'+self.map_name+'.json') as f: values=json.load(f) #set the time self.sun_and_sky.setTime(values['level']['time']) #self.sun_and_sky.show() #load visible objects for id, obj in enumerate(values['objects']): mesh=loader.loadModel(path+obj['model']) mesh.reparentTo(self.level_root) mesh.setPosHpr(tuple(obj['pos']), tuple(obj['hpr'])) mesh.setTag('id_'+str(id), str(id)) #we may need to find this mesh later to link it to a Bullet object for name, value in obj['shader_inputs'].items(): if isinstance(value, basestring): mesh.setShaderInput(str(name), loader.loadTexture(path+value)) if isinstance(value, float): mesh.setShaderInput(str(name), value) if isinstance(value, list): if len(value) == 2: mesh.setShaderInput(str(name), Vec2(value[0], value[1])) elif len(value) == 3: mesh.setShaderInput(str(name), Vec3(value[0], value[1], value[2])) elif len(value) == 3: mesh.setShaderInput(str(name), Vec4(value[0], value[1], value[2], value[3])) mesh.setShader(Shader.load(Shader.SLGLSL, obj['vertex_shader'],obj['fragment_shader'])) #set the music self.audio.setMusic(values['level']['music']) #self.level_root.prepareScene(base.win.getGsg()) messenger.send('loading-done', ['client']) return task.done #events def onQuit(self): self.level_root.removeNode() self.level_root=render.attachNewNode('level_root') self.level_root.hide() if self.ui.is_zoomed: self.ui.zoom() self.sun_and_sky.hide() self.droid.disable() self.ui.unbindKeys() self.ui.in_game_menu.hide() self.ui.main_menu.show() self.audio.setMusic('background') self.loading_status=set() self.is_in_game=False messenger.send('world-clear-level') def onTeamCahnge(self, team): self.droid.setTeam(team) def onShaderReload(self): log.debug('Client: Reloading shaders') for mesh in self.level_root.getChildren(): shader=mesh.getShader() v_shader=shader.getFilename(Shader.ST_vertex) f_shader=shader.getFilename(Shader.ST_fragment) mesh.setShader(Shader.load(Shader.SLGLSL, v_shader,f_shader)) self.ui.main_menu.setShader(path+'shaders/gui_v.glsl', path+'shaders/gui_f.glsl') self.filters.reset() def onLoadingDone(self, target): log.debug(str(target)+' loading done') self.loading_status.add(target) if self.loading_status == set(['client', 'server', 'world']): self.ui.main_menu.hide() self.level_root.show() self.sun_and_sky.show() self.ui.bindKeys() self.droid.node.setPos(render, 20,0,2) self.droid.lockCamera() self.droid.model.show() self.droid.rig.show() self.droid.gun.show() self.ui.in_game_menu.showElements('hud_') self.ui.hideSoftCursor() self.ui.is_main_menu=False self.is_in_game=True messenger.send('world-link-objects', [self.droid.node, 'pc_droid_node']) def onLevelLoad(self, map_name): self.map_name=map_name #we wait 1.0 sec for the loading animation to finish just in case if loading takes < 1.0 sec. taskMgr.doMethodLater(1.0, self.loadLevel, 'client_loadLevel_task', taskChain = 'background_chain') #taskMgr.add(self.loadLevel, 'client_loadLevel_task', taskChain = 'background_chain') #the client needs to load/setup: # -visible geometry # -enviroment (skybox/dome + sunlight diection + fog + ???) # -water plane # -unmovable (point)light sources # -unmovable vfx # -the player droid def onClientExit(self): log.debug('Client exit') self.audio.cleanup() app.exit() def onWindowReset(self): wp=self.loadWindoProperites() base.win.requestProperties(wp) def onWindowMinimize(self): self.window_minimized=base.win.getProperties().getMinimized() log.debug('window-event: Minimize is '+str(self.window_minimized)) def onWindowFocus(self): self.window_focused=base.win.getProperties().getForeground() log.debug('window-event: Focus set to '+str(self.window_focused)) if self.is_in_game: self.ui.in_game_menu.showMenu(self.window_focused) if not self.window_focused: self.ui.cursor_pos=(0,0,0) if cfg['pause-on-focus-lost']: if not self.window_focused: self.audio.pauseMusic() base.win.setActive(False) else: self.audio.resumeMusic() base.win.setActive(True) def onWindowResize(self): self.window_x=base.win.getXSize() self.window_y=base.win.getYSize() log.debug('window-event: Resize') self.filters.update() self.ui.updateGuiNodes() def onWindowEvent(self,window=None): if window is not None: # window is none if panda3d is not started if self.window_x!=base.win.getXSize() or self.window_y!=base.win.getYSize(): self.onWindowResize() elif window.getProperties().getMinimized() != self.window_minimized: self.onWindowMinimize() elif window.getProperties().getForeground() != self.window_focused: self.onWindowFocus() #tasks def update(self, task): dt = globalClock.getDt() render.setShaderInput('camera_pos', base.cam.getPos(render)) return task.cont
def addReviewFilters(db, creator, user, review, reviewer_directory_ids, reviewer_file_ids, watcher_directory_ids, watcher_file_ids): cursor = db.cursor() cursor.execute("INSERT INTO reviewassignmentstransactions (review, assigner) VALUES (%s, %s) RETURNING id", (review.id, creator.id)) transaction_id = cursor.fetchone()[0] def add(filter_type, directory_ids, file_ids): for directory_id, file_id in izip(directory_ids, file_ids): cursor.execute("""SELECT id, type FROM reviewfilters WHERE review=%s AND uid=%s AND directory=%s AND file=%s""", (review.id, user.id, directory_id, file_id)) row = cursor.fetchone() if row: old_filter_id, old_filter_type = row if old_filter_type == filter_type: continue else: cursor.execute("""DELETE FROM reviewfilters WHERE id=%s""", (old_filter_id,)) cursor.execute("""INSERT INTO reviewfilterchanges (transaction, uid, directory, file, type, created) VALUES (%s, %s, %s, %s, %s, false)""", (transaction_id, user.id, directory_id, file_id, old_filter_type)) cursor.execute("""INSERT INTO reviewfilters (review, uid, directory, file, type, creator) VALUES (%s, %s, %s, %s, %s, %s)""", (review.id, user.id, directory_id, file_id, filter_type, creator.id)) cursor.execute("""INSERT INTO reviewfilterchanges (transaction, uid, directory, file, type, created) VALUES (%s, %s, %s, %s, %s, true)""", (transaction_id, user.id, directory_id, file_id, filter_type)) add("reviewer", reviewer_directory_ids, repeat(0)) add("reviewer", repeat(0), reviewer_file_ids) add("watcher", watcher_directory_ids, repeat(0)) add("watcher", repeat(0), watcher_file_ids) filters = Filters() filters.load(db, review=review, user=user) if user not in review.reviewers and user not in review.watchers and user not in review.owners: cursor.execute("""INSERT INTO reviewusers (review, uid, type) VALUES (%s, %s, 'manual')""", (review.id, user.id,)) delete_files = set() insert_files = set() if watcher_directory_ids or watcher_file_ids: # Unassign changes currently assigned to the affected user. cursor.execute("""SELECT reviewfiles.id, reviewfiles.file FROM reviewfiles JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id) WHERE reviewfiles.review=%s AND reviewuserfiles.uid=%s""", (review.id, user.id)) for review_file_id, file_id in cursor: if not filters.isReviewer(db, user.id, file_id): delete_files.add(review_file_id) if reviewer_directory_ids or reviewer_file_ids: # Assign changes currently not assigned to the affected user. cursor.execute("""SELECT reviewfiles.id, reviewfiles.file FROM reviewfiles JOIN changesets ON (changesets.id=reviewfiles.changeset) JOIN commits ON (commits.id=changesets.child) JOIN gitusers ON (gitusers.id=commits.author_gituser) LEFT OUTER JOIN usergitemails USING (email) LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id AND reviewuserfiles.uid=%s) WHERE reviewfiles.review=%s AND (usergitemails.uid IS NULL OR usergitemails.uid!=%s) AND reviewuserfiles.uid IS NULL""", (user.id, review.id, user.id)) for review_file_id, file_id in cursor: if filters.isReviewer(db, user.id, file_id): insert_files.add(review_file_id) if delete_files: cursor.executemany("DELETE FROM reviewuserfiles WHERE file=%s AND uid=%s", izip(delete_files, repeat(user.id))) cursor.executemany("INSERT INTO reviewassignmentchanges (transaction, file, uid, assigned) VALUES (%s, %s, %s, false)", izip(repeat(transaction_id), delete_files, repeat(user.id))) if insert_files: cursor.executemany("INSERT INTO reviewuserfiles (file, uid) VALUES (%s, %s)", izip(insert_files, repeat(user.id))) cursor.executemany("INSERT INTO reviewassignmentchanges (transaction, file, uid, assigned) VALUES (%s, %s, %s, true)", izip(repeat(transaction_id), insert_files, repeat(user.id))) return generateMailsForAssignmentsTransaction(db, transaction_id)
def setUp(self): self.filters = Filters()
class Flight: def __init__(self): """ return a new, empty flight object. """ self.username = None self.flight_id = 0 self.table_name = 'tb_' + str(self.flight_id) self.info_dict = dict() self.flight_type = None self.depart = None self.dest = None self.depart_date = None self.return_date = None self.filters = None self.notifications = None # store an object information in table general_info def __new_flight__(self, flight_id: int, username: str, depart: str, dest: str, depart_date: str, flight_type: str, return_date: Optional[str] = None): """ Creates a basic flight instance, its table, and its dict. :param flight_id: :param username: :param depart: :param dest: :param depart_date: :param flight_type: :param return_date: :return: COMMENT: need to increment flight_id by 1 in load_app """ self.username = username self.flight_id = flight_id self.table_name = 'tb_' + str(flight_id) self.flight_type = flight_type self.depart = depart self.dest = dest self.depart_date = depart_date self.return_date = return_date # Create table and dicts mydb = mysql.connector.connect(host='localhost', user='******', passwd='flightplanner', database='FP_database') mycursor = mydb.cursor() set_up_command = 'CREATE TABLE {0} (Flight_id int, Date varchar(255), Min_eco int, Min_bus int, Avg_econ int,' \ ' Avg_bus int, Track_date varchar(255));'.format(self.table_name) mycursor.execute(set_up_command) mydb.commit() mycursor.close() self.info_dict = dict() self.filters = Filters() self.filters.require_filters() self.notifications = Notification() self.notifications.require_notifications() def __load_flight_dict__(self): """ loads the flight's dictionary from flight table. Track_date as key in dict. :return: a flight object with loaded dict. """ mydb = mysql.connector.connect(host='localhost', user='******', passwd='flightplanner', database='FP_database') mycursor = mydb.cursor() temp_dict = dict() mycursor.execute("SELECT * FROM {0};".format(self.table_name)) data_list = mycursor.fetchall() for record in data_list: temp_dict[record[6]] = record[:6] self.info_dict = temp_dict mycursor.close() def commit_flight_db(self, min_eco: int, min_bus: int, avg_econ: int, avg_bus: int): """ Updates both flight table and flight dict using newly extracted info from web. :param min_eco: :param min_bus: :param avg_econ: :param avg_bus: :return: """ mydb = mysql.connector.connect(host='localhost', user='******', passwd='flightplanner', database='FP_database') mycursor = mydb.cursor() record_command = "INSERT INTO {0} (Flight_id, Date, Min_eco, Min_bus, Avg_econ, Avg_bus, Track_date) VALUES" \ " ({1}, {2}, {3}, {4}, {5}, {6}, {7})".format(self.table_name, self.flight_id, self.depart_date, min_eco, min_bus, avg_econ, avg_bus, str(datetime.date.today())) mycursor.execute(record_command) mydb.commit() mycursor.close() # Now update the dict self.info_dict[str( datetime.date.today())] = (self.flight_id, self.depart_date, min_eco, min_bus, avg_econ, avg_bus)
if __name__ == "__main__": im = Image.open("images/ens.jpg") width = im.size[0] height = im.size[1] # im = im.convert("L") pix = im.load() # Preparing the convolution convoluted_im = Image.new("RGB", (width, height)) # convoluted_im = Image.new("L", (width, height)) convoluted_pix = convoluted_im.load() # Importing the filters F = Filters() _ = [F.generic(), F.box_blur(), F.gauss(), F.sobel(), F.laplace(), F.horizontal_sobel()] names = ["generic", "box_blur", "gauss", "sobel_v", "laplace", "sobel_h"] for i, x in enumerate(_): f = _[i] # convolution(pix, convoluted_pix, width, height, f) rgb_convolution(pix, convoluted_pix, width, height, f) convoluted_im.save("images/ens_%sfilter.jpg" % names[i]) # im.show() # convoluted_im.show() # Generic Pillow filter methods: # im1 = im.filter(ImageFilter.MedianFilter(3)) # im1.show() # im2 = im.filter(ImageFilter.GaussianBlur(0))
def __init__(self, output_file, get_word_freq = None): self.get_word_freq = get_word_freq self.new_words = wordb.open(output_file) self.filters = Filters() self.n_killed = 0 self.n_added = 0