def filter_data(data, perturbation_type=None, enforce_task_success=True, time_window=None): data_block = copy.deepcopy(data) if perturbation_type is not None: data_block = U.filter_by(data_block, 'stim_site', [perturbation_type]) if enforce_task_success: data_block = U.filter_by(data_block, 'behavior_report', [1]) if time_window is not None: low, high = time_window data_block['train_rates'] = data_block[ 'train_rates'][:, np.arange(low, high), :] data = BeneDict(data_block) return data
def test_ping_h1_h5_and_h2_h5_balance_of_charges(): utils.get_output("wireshark/s2-eth1-h1-ping-h5.pcap", "test/s2-ping-h1-h5.txt") utils.get_output("wireshark/s2-eth1-h2-ping-h5.pcap", "test/s2-ping-h2-h5.txt") utils.get_output("wireshark/s3-eth1-h1-ping-h5.pcap", "test/s3-ping-h1-h5.txt") utils.get_output("wireshark/s3-eth1-h2-ping-h5.pcap", "test/s3-ping-h2-h5.txt") def callback(x): return x['dst'] == '10.0.0.5' and x['protocol'] == 'ICMP' df_1 = utils.get_df("test/s2-ping-h1-h5.txt") df_1.columns = ['src', 'dst', 'protocol'] df_1 = utils.filter_by(df_1, ['dst', 'protocol'], callback) size_1 = len(df_1.index) df_2 = utils.get_df("test/s2-ping-h2-h5.txt") df_2.columns = ['src', 'dst', 'protocol'] df_2 = utils.filter_by(df_2, ['dst', 'protocol'], callback) size_2 = len(df_2.index) df_3 = utils.get_df("test/s3-ping-h1-h5.txt") df_3.columns = ['src', 'dst', 'protocol'] df_3 = utils.filter_by(df_3, ['dst', 'protocol'], callback) size_3 = len(df_3.index) df_4 = utils.get_df("test/s3-ping-h2-h5.txt") df_4.columns = ['src', 'dst', 'protocol'] df_4 = utils.filter_by(df_4, ['dst', 'protocol'], callback) size_4 = len(df_4.index) os.system("rm test/s2-ping-h1-h5.txt") os.system("rm test/s2-ping-h2-h5.txt") os.system("rm test/s3-ping-h1-h5.txt") os.system("rm test/s3-ping-h2-h5.txt") ecmp_h1_h5 = (size_1 == 0 and size_3 != 0) or (size_1 != 0 and size_3 == 0) ecmp_h2_h5 = (size_2 == 0 and size_4 != 0) or (size_2 != 0 and size_4 == 0) assert ecmp_h1_h5 and ecmp_h2_h5
def test_tcp_balance_of_charges(): utils.get_output("wireshark/s2-eth1-iperf-h1-h4.pcap", "test/s2-h1-h4.txt") utils.get_output("wireshark/s3-eth1-iperf-h1-h4.pcap", "test/s3-h1-h4.txt") def callback(x): return x['dst'] == '10.0.0.4' and x['protocol'] == 'TCP' df_1 = utils.get_df("test/s2-h1-h4.txt") df_1.columns = ['src', 'dst', 'protocol'] df_1 = utils.filter_by(df_1, ['dst', 'protocol'], callback) size_1 = len(df_1.index) df_2 = utils.get_df("test/s3-h1-h4.txt") df_2.columns = ['src', 'dst', 'protocol'] df_2 = utils.filter_by(df_2, ['dst', 'protocol'], callback) size_2 = len(df_2.index) os.system("rm test/s2-h1-h4.txt") os.system("rm test/s3-h1-h4.txt") assert (size_1 == 0 and size_2 != 0) or (size_1 != 0 and size_2 == 0)
def test_udp_denial_of_service(): utils.get_output("wireshark/s5-eth1-iperf-udp-h1-h5.pcap", "test/s5-udp-h1-h5.txt") def callback(x): return x['dst'] == '10.0.0.5' and x['protocol'] == 'UDP' df_1 = utils.get_df("test/s5-udp-h1-h5.txt") df_1.columns = ['src', 'dst', 'protocol'] df_1 = utils.filter_by(df_1, ['dst', 'protocol'], callback) size_1 = len(df_1.index) assert size_1 < 893 os.system("rm test/s5-udp-h1-h5.txt")
def get(self, user_id): """ Return the number of events of a user. :Arguments: user_id : int User id time : str Time represents ``minutes``, ``days``, ``hours``, ``weeks`` in datetime.timedelta() eg. datetime.timedelta(days=7) delta : int Delta is any int value for datetime.timedelta() eg. datetime.timedelta(days=7) If no valid time query argument is provided, it will show the user events count. """ start = timer.time() user = models.User.all().filter("id =",int(user_id)).get() if not user: raise tornado.web.HTTPError(404) time = self.get_argument("time", None) delta = self.get_argument("delta", 0) # get last x hours, days, weeks, months last_x_time = datetime.now() - utils.timedelta_wrapper(time, int(delta)) # get events from the last x time events_from_last_x_time = filter(lambda x: x.created >= last_x_time, [event for event in user.user_events] ) data = {} if not time: # show all events for user data["description"] = "Number of events for User %s" % (str(user_id)) data["load_time"] = timer.time() - start data["events"] = user.user_events.count() else: data["description"] = "Number of events for User %s for the last %s %s" % (str(user_id), str(delta), str(time)) data["load_time"] = timer.time() - start data["grouping"] = utils.filter_by(time, events_from_last_x_time, last_x_time) self.write(utils.json_encode(data))
#!/usr/bin/python import matplotlib.pyplot as plt import numpy as np import utils # TODO(ipince): make this less nasty from import_data import data, tables, centers, parishes, munis, states, jt, jc, jp print cutoff = 95 cha_dom_tables = utils.filter_by(tables[0], lambda v: v['gov_pct'] >= cutoff) mad_dom_tables = utils.filter_by(tables[1], lambda v: v['gov_pct'] >= cutoff) cap_dom_tables = utils.filter_by(tables[1], lambda v: v['cap_pct'] >= cutoff) mad_dom_cha = mad_dom_tables.difference(cha_dom_tables) print 'Using a cutoff of %d%%' % cutoff print 'Capriles dominated in %d tables.' % len(cap_dom_tables) print 'Maduro dominated in %d tables. Chavez in %d' % (len(mad_dom_tables), len(cha_dom_tables)) print 'Maduro dominated in %d tables that Chavez did not' % len(mad_dom_cha) for code in mad_dom_cha: print utils.short(tables[0][code]) print utils.short(tables[1][code]) # Do any of those have significant support for Capriles that was gone in 2013? # Compare Maduro vs Chavez at table level.
def isolate_requests(har_file): har = {} with open(har_file) as harfile: har = json.load(harfile) global_vars.harfile_pagetime = har['log']['pages'][0][ 'startedDateTime'] # needed later on global_vars.req_resp = har['log']['entries'] if global_vars.debug: print "[+] Read %d entries" % len(global_vars.req_resp) if conf.domains == []: fdomain = raw_input( termcolor.colored("Filter by domain? (ENTER for no): ", color='yellow')) if fdomain: utils.filter_by(fdomain) else: if global_vars.debug: print termcolor.colored("[c] Read %d domains to filter on" % len(conf.domains), color='magenta') for fdomain in conf.domains: utils.filter_by(fdomain) no_data = raw_input( termcolor.colored("Ignore media/fonts/css/... junk? (Y/n): ", color='yellow')) if no_data in ["", "y", "Y"]: junk_ext = [ ".ttf", ".woff", ".otf", ".eot", # fonts ".css", ".sass", # styles ".img", ".jpg", ".jpeg", ".png", ".svg", ".webp", ".gif", ".bmp", ".ico", ".pdf", # img / doc ] # media junk_ext += [j.upper() for j in junk_ext] # ...and .JPG has been seen for e in list(global_vars.req_resp): if 'data:' in e['request']['url']: global_vars.req_resp.remove(e) if global_vars.debug: print '[+] Ingoring entry with url: ' + e['request']['url'] continue for j in junk_ext: if j in e['request']['url']: global_vars.req_resp.remove(e) if global_vars.debug: print '[+] Ingoring entry with url: ' + e['request'][ 'url'] return
print "Comparing centers" compare_places(data[0]['center'], data[1]['center']) print "Comparing tables" compare_places(data[0]['table'], data[1]['table']) print print "Filtering 7O" filter_uncounted(data[0]['table']) filter_uncounted(data[0]['center']) print "Filtering 14A" filter_uncounted(data[1]['table']) filter_uncounted(data[1]['center']) # CNE data weirdness in 7O: print odd_7o_table_codes = utils.filter_by(data[0]['table'], lambda v: v['voting_voters'] != v['scrut_votes']) print "Tables in 7O where voting voters does not match scrutinized votes: %d" % len(odd_7o_table_codes) # variables for easy access tables = [data[0]['table'], data[1]['table']] centers = [data[0]['center'], data[1]['center']] parishes = [data[0]['parish'], data[1]['parish']] munis = [data[0]['muni'], data[1]['muni']] states = [data[0]['state'], data[1]['state']] jt = {} # for joined_tables good_codes = set(tables[0]).intersection(set(tables[1])).difference(odd_7o_table_codes) for code in good_codes: jt[code] = [tables[0][code], tables[1][code]] good_codes = set(centers[0]).intersection(set(centers[1]))
### Participation-related analysis print "Histogram of participation diff" diff = list() for code in jt: diff.append(tables[1][code]['particip'] - tables[0][code]['particip']) m = utils.mean(diff) st = utils.stdev(diff) print "Mean= %f , and stdev= %f " % (m, st) #plt.hist(diff, bins=range(-30, 30)) #plt.show() pbound = 98 geq = lambda v: v['particip'] >= pbound print "Tables in 7O with >%d pct particip: %d" % (pbound, len(utils.filter_by(tables[0], geq))) print "Tables in 14A with >%d pct particip: %d" % (pbound, len(utils.filter_by(tables[1], geq))) #delta = round(m + 3*st, 2) delta = 10 print "Tables in which there is more than %.2f pct participation difference:" % delta codes = utils.compare_by( tables, lambda v1, v2: v2['particip'] - v1['particip'] > delta, output=False, exclude=odd_7o_table_codes) print "There are %d such tables" % len(codes) #s = sorted([jt[c] for c in codes], key=lambda vs: vs[1]['gov_pct'], reverse=True) s = sorted([jt[c] for c in codes], key=lambda vs: vs[1]['particip'] - vs[0]['particip'] , reverse=True) #for elm in s: