def trendCurves(): model = MixedUsersModel() experimentFileName = spamModelFolder+model.id conf = {'model': model, 'addUsersMethod': User.addUsersUsingRatio, 'analysisMethods': [(Analysis.trendCurves, 1)], 'ratio': {'normal': 0.985, 'spammer': 0.015}, 'experimentFileName': experimentFileName} GeneralMethods.runCommand('rm -rf %s'%experimentFileName); run(**conf) Analysis.trendCurves(experimentFileName=experimentFileName)
def calculate_weightAvg_async(user): tracks = Track.objects.order_by('timestamp') times = [] weights = [] postRef = [] #Loop through all tracks and create period object for each day for track in tracks: times.append(Period(track.timestamp, freq='D')) weights.append(track.weight) postRef.append(track.to_dbref()) periods = PeriodIndex(times) ts = Series(weights, index=periods) mean = rolling_mean(ts, 10) print mean new_Analysis = Analysis( author = user ) for i, entry in enumerate(mean): my_data = DailyAnalysis ( weightAvg = entry, date = times[i].to_timestamp(), postRef = postRef[i] ) new_Analysis.dailyAnalysis.append(my_data) new_Analysis.save()
def get(self): datatype = self.request.get('datatype') fortnight_ago = datetime.now() + timedelta(days=-14) counter = 0 self.response.out.write( "Looking to delete stuff from before " + str(fortnight_ago) + "<br>" ) if datatype == "tweet": old_tweets = Tweet.gql( 'WHERE created_at < :before LIMIT 1000', before=fortnight_ago) for t in old_tweets: t.delete() counter += 1 elif datatype == "analysis": old_analyses = Analysis.gql( 'WHERE created_at < :before LIMIT 1000', before=fortnight_ago) for a in old_analyses: a.delete() counter += 1 elif datatype == "gdata": old_gdatas = AnalysisGData.gql( 'WHERE created_at < :before LIMIT 50', before=fortnight_ago) for g in old_gdatas: g.delete() counter += 1 else: self.response.out.write("Please say what datatype you'd like to tidy up. ") self.response.out.write(str(counter) + " items deleted")
def demo(): demoID = '513a07ec9c7c840007acfc52' #me! posts = Track.objects(author=demoID) analysis = Analysis.objects(author=demoID).first() return render_template("demo.html", title = 'Demo', posts = posts, analysis = analysis)
def ProcessJob(token): """ Processes a job into a json document, which is returned. If there are any errors, it will return False. """ job = Analysis.objects(analysis_token=token)[0] CountAlleles(job) schema = GenerateSchema(job) return FixSchema(schema)
def user(name): user = User.objects(name = name).first() if user == None: flash('User ' + name + ' not found.') return redirect(url_for('index')) if g.user == user: posts = Track.objects(author=user) analysis = Analysis.objects(author=g.user).first() return render_template('user.html', user = user, posts = posts, analysis = analysis) flash('You can only look at your own profile.') return redirect(url_for('index'))
def index(): form = TrackForm() if form.validate_on_submit(): tracking = Track ( weight = form.weight.data, happy = form.happy.data, diet = form.diet.data, exercise = form.exercise.data, floss = form.floss.data, meditation = form.meditation.data, note = form.note.data, timestamp = datetime.utcnow(), author = g.user.to_dbref()) tracking.save() flash('Your post is now live!') calculate_weightAvg_async(g.user.to_dbref()) return redirect(url_for('index')) posts = Track.objects(author=g.user) analysis = Analysis.objects(author=g.user).first() return render_template("index.html", title = 'Home', form = form, posts = posts, analysis = analysis)
def sample_analyses(request): types = Analysis.get_types() return render(request, 'analysis/sample_analyses.html', {'types' : types})
def testCeilingAlarmAndStandardProcessing(self): self.process = ProcessTask.Create(self.e) spec = json.dumps({ 'processers': [{ 'calculation': 'MAX({speed})', 'column': 'max_speed', 'analysis_key_pattern': ANALYSIS_KEY_PATTERN }, { 'calculation': '. + SUM({bearing})', 'column': 'total_bearing', 'analysis_key_pattern': ANALYSIS_KEY_PATTERN }, { 'calculation': '. + COUNT({bearing})', 'column': 'count_bearing', 'analysis_key_pattern': ANALYSIS_KEY_PATTERN }, { 'calculation': '. + COUNT(ALARMS())', 'column': 'count_alarms', 'analysis_key_pattern': ANALYSIS_KEY_PATTERN }] }) self.process.Update(spec=spec, rule_ids=[self.speeding_alarm.key().id()]) self.process.put() # Apply our process to our sensor self.sp = SensorProcessTask.Create(self.e, self.process, self.vehicle_1) self.sp.put() BATCH_1 = { 'speed': [ 0, 5, 15, 35, 60, 80, 83, 88, 85, 78, 75, 75, 76, 81, 89, 92, 90, 83, 78 ], # We speed twice 'bearing': [0, 0, 0, 0, 5, 3, 3, 3, 4, 5, 0, 0, 0, 0, 1, 1, 2, 3, 2] } self.__createNewRecords(BATCH_1, first_dt=datetime.now() - timedelta(minutes=5)) self.__runProcessing() # Confirm analyzed max speed a = Analysis.GetOrCreate(self.vehicle_1, ANALYSIS_KEY_PATTERN) self.assertIsNotNone(a) self.assertEqual(a.columnValue('max_speed'), max(BATCH_1['speed'])) # Confirm we counted new alarms in analysis # self.assertEqual(a.columnValue('count_alarms'), 2) TODO: This fails! self.sp = SensorProcessTask.Get(self.process, self.vehicle_1) self.assertEqual(self.sp.status_last_run, PROCESS.OK) # Confirm speeding alarms (2) alarms = Alarm.Fetch(self.vehicle_1, self.speeding_alarm) self.assertEqual(len(alarms), 2) # Test alarm notifications # TODO: Test output of notification (e.g. log messages or contact records) a = alarms[0] # second alarm message = a.render_alert_message(recipient=self.owner) SPEEDING_ALERT_MESSAGE_RENDERED = "Hello Dan Owner, %s was speeding at 81 at %s" % ( TEST_SENSOR_ID, tools.sdatetime(a.dt_start, fmt="%H:%M", tz="Africa/Nairobi")) self.assertEqual(message, SPEEDING_ALERT_MESSAGE_RENDERED) BATCH_2 = {'speed': [76, 75, 78, 73, 60], 'bearing': [0, 0, 2, 0, 5]} self.__createNewRecords(BATCH_2) self.__runProcessing() a = Analysis.GetOrCreate(self.vehicle_1, ANALYSIS_KEY_PATTERN) self.assertEqual(a.columnValue('total_bearing'), sum(BATCH_1['bearing']) + sum(BATCH_2['bearing'])) self.assertEqual(a.columnValue('count_bearing'), len(BATCH_1['bearing']) + len(BATCH_2['bearing'])) self.assertEqual(a.columnValue('count_alarms'), 2) self.assertEqual(self.sp.status_last_run, PROCESS.OK)
def testAnalysisAPIs(self): self.analysis = Analysis.Get(self.e, "ROLLUP", get_or_insert=True) self.analysis.put() # Test update params = self.__commonParams() params.update({ 'akn': 'ROLLUP', 'cols': 'TOTAL,MINIMUM', 'TOTAL': 10, 'MINIMUM': 2.5 }) result = self.post_json("/api/analysis", params) self.assertTrue(result['success']) # Test batch update data = { 'ROLLUP1': { 'TOTAL': 4, 'MINIMUM': 0 }, 'ROLLUP2': { 'TOTAL': -4, 'MINIMUM': 10 } } params = self.__commonParams() params.update({'data': json.dumps(data)}) result = self.post_json("/api/analysis/multi", params) self.assertTrue(result['success']) # Test batch update (to confirm other values not overwritten) data = {'ROLLUP1': {'NEW': 'hello'}} params = self.__commonParams() params.update({'data': json.dumps(data)}) result = self.post_json("/api/analysis/multi", params) self.assertTrue(result['success']) # Confirm update occurred preserving other values rollup1 = Analysis.get_by_key_name('ROLLUP1', parent=self.e) self.assertEqual(rollup1.columnValue('NEW'), 'hello') self.assertEqual(rollup1.columnValue('TOTAL'), 4) rollup1 = Analysis.get_by_key_name('ROLLUP1', parent=self.e) rollup2 = Analysis.get_by_key_name('ROLLUP2', parent=self.e) self.assertEqual(rollup1.columnValue('TOTAL'), 4) self.assertEqual(rollup2.columnValue('TOTAL'), -4) # Create second record params.update({ 'akn': 'TODAY', 'cols': 'TOTAL,MINIMUM', 'TOTAL': 1, 'MINIMUM': 0 }) result = self.post_json("/api/analysis", params) self.assertTrue(result['success']) # Test detail params = self.__commonParams() params['with_props'] = 1 result = self.get_json("/api/analysis/ROLLUP", params) self.assertTrue(result['success']) self.assertEqual(result['data']['analysis']['columns']['TOTAL'], '10') self.assertEqual(result['data']['analysis']['columns']['MINIMUM'], '2.5') # Test detail multi params = self.__commonParams() params['with_props'] = 1 result = self.get_json("/api/analysis/multi/ROLLUP,TODAY", params) self.assertTrue(result['success']) self.assertEqual(type(result['data']['analyses']['ROLLUP']), dict) self.assertEqual(type(result['data']['analyses']['TODAY']), dict)
def get_game_analysis(self, request): """Return history of in-game choices.""" games = Game.query().order(Game.user) if not games: raise endpoints.NotFoundException('Games not found.') return Analysis(analysis=[game.to_game_analysis() for game in games])
def create(self, request): analysis = Analysis(**request.data) analysis.save() return HttpResponse(analysis.to_json())