def test_set(self): os.system("python main.py -t 0.2 > test_clock.txt") data = parseData("test_clock.txt") self.assertEqual(0.2, data['clock']['total']) os.system("python main.py -t 0.5 > test_clock.txt") data = parseData("test_clock.txt") self.assertEqual(0.5, data['clock']['total'])
def test_set(self): os.system("python main.py -t 0.2 -r 500 > test_road.txt") data = parseData("test_road.txt") # Check the cmd line arg self.assertEqual(500, data['road_length']) # Do the math self.assertEqual(500, data['road']['east_end'] - data['road']['west_end']) # Check road sources for src in data['sources']: if src['direction'] == "WestBound": self.assertEqual(500, src['location']) # Check road users time spent on road for user in data['road_users']['list']: if not user['left_on_road']: if not user['platoon']: time = user['removed'] - user['created'] self.assertAlmostEqual(time, 500 / abs(user['velocity']), delta=1, msg="Failed: Road User ID: " + str(user['id'])) else: time = user['removed'] - user['created'] # Add 0.1 to account for rounding self.assertTrue(time + 0.1 >= 500 / abs(user['velocity']), msg="Failed: Road User ID: " + str(user['id']))
def test_constant(self): os.system("python main.py -t 0.2 -cd 0 -pd 0 -vd 0 > test_speeds.txt") data = parseData("test_speeds.txt") clock_step = data['clock']['tick_length'] users = data['road_users']['list'] for user in users: # Verify distance/tick of road users self.assertAlmostEqual(user['velocity_tick'], user['velocity'] * clock_step, delta=0.1) # Verify constant speed - equality check if user['type'] == "Pedestrian": self.assertAlmostEqual(data['ped_speed'], abs(user['velocity']), delta=0.1) if user['type'] == "Bicycle": self.assertAlmostEqual(data['bike_speed'], abs(user['velocity']), delta=0.1) if user['type'] == "Motor Vehicle": self.assertAlmostEqual(data['mv_speed'], abs(user['velocity']), delta=0.1) # Verify speed corresponds to direction if user['direction'] == "EastBound": self.assertTrue(user['velocity'] > 0) else: self.assertTrue(user['velocity'] < 0)
def test_set(self): os.system("python main.py -t 0.2 -r 500 > test_IDs.txt") data = parseData("test_IDs.txt") users = data['road_users']['list'] last = None count = 0 # Check user IDs for user in users: if last == None: last = user['id'] else: self.assertEqual(user['id'], last + 1) last = user['id'] count += 1 # Test length of users if user['type'] == "Pedestrian": self.assertEqual(1, user['length']) if user['type'] == "Bicycle": self.assertEqual(2, user['length']) if user['type'] == "Motor Vehicle": self.assertEqual(6, user['length']) self.assertEqual(count, data['road_users']['totals']['total']) self.assertEqual(count, last + 1)
def test_types(self): os.system("python main.py -t 1 > test_platoon.txt") data = parseData("test_platoon.txt") users = data['road_users']['list'] for user in users: if user['platoon']: self.assertTrue(user['type'] == "Motor Vehicle")
def test_users(self): os.system("python main.py -t 1 > test_critical.txt") data = parseData("test_critical.txt") for inter in data['interactions']: users = set() for user in inter['road_users']: users.add(user['id']) for user in inter['critical_users']: self.assertTrue(user in users)
def test_set(self): args = "-r 2000 -t 0.5 -it 3.0 -p 10 -c 15 -v 25 -pp 25 -cp 75 -vp 30 -pa 1 -ca 1 -va 1 -ps 1.2 -cs 6.2 -vs 10.2 -pd 0 -cd 0 -vd 0 -d 1 -sd 1" os.system("python main.py " + args + " > test_vars.txt") data = parseData("test_vars.txt") nums = extract_nums(args) vals = list(data.values()) for i in range(len(nums)): self.assertEqual(nums[i], vals[i + 1])
def listAppliancesList(self, name=None, ltype=None): url = self.reference.createAppendURL(string='appliances') data = {} if ltype is not None: data['type'] = ltype if name is not None: data['name'] = name r = self.auth.get(url, params=data) return parseData(r.text)
def test_default(self): os.system("python main.py -t 0.2 > test_road.txt") data = parseData("test_road.txt") # Check the cmd line arg self.assertEqual(1000, data['road_length']) # Do the math self.assertEqual(1000, data['road']['east_end'] - data['road']['west_end']) # Check road sources for src in data['sources']: if src['direction'] == "WestBound": self.assertEqual(1000, src['location'])
def test_loc(self): os.system("python main.py -t 0.2 > test_locations.txt") data = parseData("test_locations.txt") sources = data['sources'] users = data['road_users']['list'] for user in users: direction = user['direction'] user_type = user['type'] src = None for source in sources: if source['direction'] == direction and source['type'] in user_type: src = source self.assertAlmostEqual(src['location'], user['create_pos'], delta=1, msg="Failed: Road User ID: " + str(user['id']))
def test_label(self): os.system("python main.py -t 10 > test_critical.txt") data = parseData("test_critical.txt") for inter in data['interactions']: res = Counter(inter['code']) if "M" in res: if res['M'] == 2: self.assertTrue(inter['critical']) else: self.assertFalse(inter['critical']) else: self.assertFalse(inter['critical'])
def test_two(self): os.system("python main.py -t 1 -c 0 -p 0 -vd 0 > test_phase1.txt") data = parseData("test_phase1.txt") pred_meetings = ((25/3600) / data['mv_speed'] * data['road_length']) + ((25/3600) * (data['road_length'] / data['mv_speed'])) counts = [] # For each user, count the number of interactions they were involved in for user in data['road_users']['list']: count = 0 for event in data['interactions']: for participant in event['road_users']: if user['id'] == participant['id']: count += 1 counts.append(count) # Take the median of the counts, as that should be close to the prediction self.assertAlmostEqual(pred_meetings, statistics.median(counts), delta=0.5)
def processData(): #takes data from database and process it to produce analysisInfo with features that can be used for machine learning children=parse.parseData() analysis = extract(children) startT=findStartDate(analysis) endT=findEndDate(analysis) print "date diff", dateDiff(startT, endT) extend(children, analysis, startT, endT) addSleepChart(children, analysis) computeRateNtTr(analysis) compBigImprov(analysis) computeCompleteDevRate(analysis) userReport(analysis) #searchMissingUsers(analysis) addFeatureForML(children, analysis) computeCorrCoef(analysis) return children, analysis, startT, endT
def test_number(self): os.system("python main.py -t 1 > test_platoon.txt") data = parseData("test_platoon.txt") users = data['road_users']['list'] east_platooners = 0 west_platooners = 0 for user in users: if user['direction'] == "EastBound": if user['platoon']: east_platooners += 1 else: self.assertFalse(east_platooners == 1) east_platooners = 0 else: if user['platoon']: west_platooners += 1 else: self.assertFalse(west_platooners == 1) west_platooners = 0 self.assertFalse(east_platooners == 1) self.assertFalse(west_platooners == 1)
def distributions(self): os.system("python main.py -c 50 -p 50 > test_speeds.txt") data = parseData("test_speeds.txt") ped_speeds = [] bike_speeds = [] veh_speeds = [] for user in data['road_users']['list']: if user['type'] == 'Pedestrian': ped_speeds.append(abs(user['velocity'])) elif user['type'] == 'Bicycle': bike_speeds.append(abs(user['velocity'])) else: veh_speeds.append(abs(user['velocity'])) # Test pvalue for normal dist pval = stats.normaltest(ped_speeds).pvalue self.assertGreater(pval, 0.05, msg="Testing normal dist for ped speeds") pval = stats.normaltest(bike_speeds).pvalue self.assertGreater(pval, 0.05, msg="Testing normal dist for ped speeds") pval = stats.normaltest(veh_speeds).pvalue self.assertGreater(pval, 0.05, msg="Testing normal dist for ped speeds") # Check median self.assertAlmostEqual(data['ped_speed'], statistics.median(ped_speeds), delta=2) self.assertAlmostEqual(data['bike_speed'], statistics.median(bike_speeds), delta=2) self.assertAlmostEqual(data['mv_speed'], statistics.median(veh_speeds), delta=2)
def checkVersion(self): r = self.auth.get(self.versionURL()) return parseData(r.text)
def listAppliances(self, pagesize=10, page=1): url = self.reference.createAppendURL(string='appliances') r = self.auth.get(url, params={'pageSize': pagesize, 'page': page}) return parseData(r.text)
#Jackson Zou #SoftDev pd9 #K09 -- Yummy Mongo Py #2020-02-26 from pymongo import MongoClient from parse import parseData, getRestaurantsByBorough, getRestaurantsByZip, getRestaurantsByZipandGrade, getRestaurantsByZipandScore client = MongoClient() db = client["restaurants"] print(db) parseData(db, "primer-dataset.json") getRestaurantsByZipandScore(db, "10024", "3") getRestaurantsByZipandGrade(db, "10024", "B") getRestaurantsByZip(db, "10024") getRestaurantsByBorough(db, "Brooklyn")
# Result output csv file OUTPUT_CSV_FILENAME = "output/result.csv" # Nerual Network Parameters HIDDEN_LAYER = [128] # 1 hidden layer BPTT_ORDER = 4 LEARNING_RATE = 0.05 EPOCH_NUM = 10 # number of epochs to run before saving the model BATCH_SIZE = 1 currentEpoch = 0 print 'Parsing training data...' t0 = time.time() trainWordIndices = parse.parseData(TRAIN_FILENAME) t1 = time.time() print '...costs ', t1 - t0, ' seconds' NEURON_NUM_LIST = [ HIDDEN_LAYER + [ wordUtil.WORD_VECTOR_SIZE ] ] + HIDDEN_LAYER + [ [ wordUtil.TOTAL_WORDS, wordUtil.WORD_CLASS_NUM ] ] print 'Generating utils for class-based output layer...' t0 = time.time() wordUtil.genWordClassUtils(trainWordIndices) t1 = time.time() print '...costs ', t1 - t0, ' seconds' print 'Training...' aDNN = dnn.dnn( NEURON_NUM_LIST, BPTT_ORDER, LEARNING_RATE, EPOCH_NUM, BATCH_SIZE, LOAD_MODEL_FILENAME ) while True: t2 = time.time()
def commitData(self, pagesize=10, page=1): url = self.reference.createAppendURL(string='commit') r = self.auth.post(url) return parseData(r.text)