def on_message(client, userdata, msg): current_pose = None f = ['tree_pose', 'warrior_1', 'crescent_lunge', 'warrior_2', 'triangle', 'half_moon', 'plank', 'chaturanga_dandasana', 'cobra'] elif msg.topic == 'hermes/intent/smayorquin:BeginClass': #Sample flow flow = Flow(f) flow.run()
def setUp(self): self.flow = Flow() self.n1 = INode() self.n2 = INode() self.f1 = Flow() self.f1.addNode(self.n1) self.f1.addNode(self.n2)
def analyse_assignment_expression(self, assignment_node): ''' type: 'AssigmentExpression'; operator: '=' | '*=' | '**=' | '/=' | '%=' | '+=' | '-=' |'<<=' | '>>=' | '>>>=' | '&=' | '^=' | '|='; left: Identifier; right: Identifier; ''' left = assignment_node['left'] right = assignment_node['right'] operator = assignment_node['operator'] self.dispatcher(left) self.dispatcher(right) # Assignment node gets flow from right right_flow = right['flow'] left_flow = left['flow'] # we don't want to account for left sources: they will be overwritten left_flow.remove_sources() left_flow.remove_sanitizers() right_flow.remove_sinks() resulting_flow = Flow([right_flow, left_flow]) assignment_node['flow'] = Flow([right_flow]) # Variable from left gets flow from right # NOTE: left node doesn't need to get the flow from right # NOTE: we want to keep track of left sinks self.variable_flows[left['full_name']] = Flow([resulting_flow]) # Check if left is sink self.vulnerabilities += resulting_flow.check_vulns()
def handlePacket(self, pkt): self.pkts.append(pkt) elts = [] edges = [] new_elts, new_edges = self.checkIP(pkt) if new_elts: elts += new_elts if new_edges: edges += new_edges new_elts, new_edges = self.checkDNS(pkt) if new_elts: elts += new_elts if new_edges: edges += new_edges # do flow analysis here, if necessary if TCP in pkt or UDP in pkt: Flow.pkt_handler(pkt, self.flows) self.send_flow_statistics(self.flows[Flow.flowid(pkt)]) # end flow analysis self.send_nodes(elts, edges)
def train(): epochs = 1 ckpt = os.path.join(envs.EXPT_DIR, envs.CKPT_FILE) # load labeled and unlabeled dataset lpth = os.path.join(envs.EXPT_DIR, 'labeled.pkl') with open(lpth, 'rb') as f: labeled = pickle.load(f) labeled = list(labeled.keys()) unlabeled = list(set(range(len(entire_data))) - set(labeled)) flow = Flow(model=model, labeled=labeled, unlabeled=unlabeled, batch_size=batch_size, cap=None, resume_from=envs.RESUME_FROM) for epoch in range(epochs): loss, accuracy = flow.train() # save ckpt ckpt = { 'model': model.state_dict(), 'optimizer': flow.optimizer.state_dict() } torch.save(ckpt, os.path.join(envs.EXPT_DIR, envs.CKPT_FILE)) return
class Agent(): # n_episodes=0 gamma=0, alpha, lambda_epsilon=lambda x: x, epsilon=1 def __init__(self, env=None, n_episodes=0, gamma=0, lambda_epsilon=lambda x: x, alpha=0.001, epsilon=1): self.n_episodes = n_episodes self.epsilon = epsilon self.lambda_epsilon = lambda_epsilon self.env = env self.network = DQClassifier(alpha, (1, ), env.action_space.n) self.flow = Flow(self.network, gamma) def select_action(self, state): """ apply epsilon greedy """ if random.uniform(0, 1) < self.epsilon: # explore - random action action = self.env.action_space.sample() else: # exploit - a_max in Q at state action = self.flow.predict(state) self.epsilon = self.lambda_epsilon(self.epsilon) return action def update(self, state, next_state, action, reward): """ train for the current step in the model """ self.flow.learn(state, next_state, action, reward)
def __init__(self, gb): self.gb = gb # -- flow -- # self.discr_flow = Flow(gb) shape = self.discr_flow.shape() self.flux_pressure = np.zeros(shape) # -- temperature -- # self.discr_temperature = Heat(gb) # -- solute and precipitate -- # self.discr_solute_advection_diffusion = Transport(gb) self.discr_solute_precipitate_reaction = Reaction(gb) # -- porosity -- # self.discr_porosity = Porosity(gb) # -- fracture aperture -- # self.discr_fracture_aperture = FractureAperture( gb, "fracture_aperture") # -- layer porosity and aperture -- # self.discr_layer_porosity = Porosity(gb, "layer_porosity") self.discr_layer_aperture = LayerAperture(gb, "layer_aperture") # the actual time of the simulation self.time = 0
def fromJson(dict, debug=False, inputFile="N/A"): g = Graph.fromJson(dict['graph']) bad = [] if "bad" in dict: for k in dict['bad']: bad.append(Flow.fromJson(k)) good = [] for k in dict['good']: good.append(Flow.fromJson(k)) cc = [] for k in dict['cc']: cc.append(EdgesData.fromJson(k)) compCost = {} if 'compCost' in dict: for host, costs in dict['compCost'].items(): for lvl in range(len(costs)): compCost[(int(host), lvl)] = costs[lvl] gateway = [] if 'gateways' in dict: gateway = dict['gateways'] switchCap = {} if 'switchCap' in dict: switchCap = dict['switchCap'] return Network(g, dict['routers'], dict['hosts'], gateway, dict['traffics'], bad, good, cc, dict['affected'], dict['memory'], switchCap, compCost, dict['topology'], debug, inputFile)
def __init__(self, flow_id, source, destination, amount, env, time, bw): """ Constructor for Flow class """ Flow.__init__(self, flow_id, source, destination, amount, env, time, bw) self._ssthresh = 9999 self._resend_time = 100
def handlePacket(self, pkt): self.pkts.append(pkt) elts = [] edges = [] new_elts, new_edges = self.checkIP(pkt) if new_elts: elts += new_elts if new_edges: edges += new_edges new_elts, new_edges = self.checkDNS(pkt) if new_elts: elts += new_elts if new_edges: edges += new_edges # do flow analysis here, if necessary if TCP in pkt or UDP in pkt: Flow.pkt_handler(pkt, self.flows) flow = self.flows[Flow.flowid(pkt)] self.send_flow_statistics(flow) new_elts, new_edges = self.checkHTTP(flow) if new_elts: elts += new_elts if new_edges: edges += new_edges # end flow analysis self.send_nodes(elts, edges)
def run_task(user_tweets, nodes, nodes_params, detect_interval, directed): """ Parallelize the flow detection algorithm for a given set of parameters.""" # Detect tweets on each cluster machine flows = user_tweets.flatMap(lambda x: Flow.infer_flows(x[0], x[1], nodes.value, detect_interval, directed)) # Aggregate results for each Flow agg_flows = flows.reduceByKey(Flow.reduce_flows_helper) # Build the final Flow objects from attributes final_flows = agg_flows.map(lambda x: Flow.build_final_flows(x[0], x[1])) # Generate node weights weighted_nodes = final_flows.flatMap(lambda x: [(x.src, x.weight), (x.dst, x.weight)]) weighted_nodes = weighted_nodes.reduceByKey(lambda a, b: a + b) weighted_nodes = weighted_nodes.map(lambda x: {'node': x[0], 'weight': x[1]}) # Save the results filename = '_{}_{}_{}_{}_{}.json'.format(nodes_params[0], nodes_params[1], nodes_params[2], detect_interval, directed) json_mapper = lambda x: json.dumps(x, default=lambda y: y.json) final_flows.map(json_mapper).saveAsTextFile(os.path.join(PATH_BASE, 'results', 'flows' + filename)) weighted_nodes.map(json_mapper).saveAsTextFile(os.path.join(PATH_BASE, 'results', 'nodes' + filename))
def add_flow(self, flow_id, src, dest, data_amount, start_time, flowtype): """Adds a new flow to the network. Args: flow_id (str): id of the flow src (Host): source host dest (Host): destination host data_amount (float): amount of data to be sent in MB start_time (float): when the flow starts sending packets in seconds flowtype (str): the type of congestion control algorithm the flow will use. Valid parameters are "fast", "reno", or None, which defaults to TCP Tahoe. """ # Convert data_amount from megabytes to bits num_bits = data_amount * BYTE_TO_BIT * MEGABIT_TO_BIT if flowtype is not None: if "fast" == flowtype.lower(): flow = FAST_TCP(self, flow_id, src, dest, num_bits, start_time) elif "reno" == flowtype.lower(): flow = FlowReno(self, flow_id, src, dest, num_bits, start_time) else: flow = Flow(self, flow_id, src, dest, num_bits, start_time) else: flow = Flow(self, flow_id, src, dest, num_bits, start_time) self.flows[flow_id] = flow self._num_active_flows += 1 # Add flows to src and dest src.add_flow(flow) dest.add_flow(flow)
class TestInterface(unittest.TestCase): def setUp(self): self.n1 = INode() self.n2 = INode() self.f1 = Flow() self.f1.addNode(self.n1) self.f1.addNode(self.n2) self.i1 = self.n2.i1 self.i2 = self.n1.i2 self.i3 = self.n2.i3 self.i4 = self.n1.i4 def test_repr(self): n = INode(id='foo') self.assertEqual("INode(foo)::Interface(i1)", unicode(n.i1)) def test_isInput(self): self.assertTrue(self.i1.isInput()) self.assertTrue(self.i3.isInput()) self.assertFalse(self.i2.isInput()) self.assertFalse(self.i4.isInput()) def test_isCompatible(self): self.assertFalse(self.i1.isCompatible(self.i1)) # Same interface self.assertFalse(self.i1.isCompatible(self.i3)) # Same node self.assertEqual(self.i1.type, Interface.INPUT) self.assertEqual(self.i2.type, Interface.OUTPUT) self.assertEqual(self.i3.type, Interface.PARAMETER) self.assertEqual(self.i4.type, Interface.RESULT) self.assertTrue(self.i1.isCompatible(self.i2)) self.assertTrue(self.i1.isCompatible(self.i4)) self.assertTrue(self.i3.isCompatible(self.i2)) self.assertTrue(self.i3.isCompatible(self.i4)) self.assertFalse(self.i2.isCompatible(self.i1)) self.assertFalse(self.i2.isCompatible(self.i3)) self.assertFalse(self.i4.isCompatible(self.i1)) self.assertFalse(self.i4.isCompatible(self.i3)) def test_addSuccessor(self): self.assertFalse(self.i2 in self.i1.successors) self.assertFalse(self.i1 in self.i2.predecessors) self.i2.addSuccessor(self.i1) self.assertTrue(self.i1 in self.i2.successors) self.assertTrue(self.i2 in self.i1.predecessors) self.assertRaises(FlowError, self.i1.addSuccessor, self.i2) def test_removeSuccessor(self): self.assertFalse(self.i2 in self.i1.successors) self.assertFalse(self.i1 in self.i2.predecessors) self.assertRaises(FlowError, self.i1.removeSuccessor, self.i2) self.i2.addSuccessor(self.i1) self.assertRaises(FlowError, self.i1.removeSuccessor, self.i2) self.i2.removeSuccessor(self.i1) self.assertFalse(self.i2 in self.i1.successors) self.assertFalse(self.i1 in self.i2.predecessors)
def test_reduce(self, numeric_dataset): """Test reduce""" flow = Flow().from_source(Source(fin=numeric_dataset.open())).map( lambda x: [float(_) for _ in x] ).map(lambda x: sum(x)) assert flow.reduce(lambda a, b: a + b).eval() == [145]
def __init__(self, ns, flow_id, src, dest, data_amount, start_time): Flow.__init__(self, ns, flow_id, src, dest, data_amount, start_time) self._gamma = FAST_GAMMA self._alpha = FAST_ALPHA self.last_rtt = float('inf') self.base_rtt = float('inf') self.schedule_next_update()
def demand_flow(self): """Generates lines for demand volume constraints""" for sn in range(1, self.n_source_nodes+1): for tn in range(1, self.n_transit_nodes+1): for dn in range(1, self.n_destination_nodes+1): flow = Flow() flow.create_constraint(sn, tn, dn) self.add_line(flow)
def test_reload(self, numeric_dataset): """Test reloading""" flow = Flow().from_source(Source(fin=numeric_dataset.open())).map( lambda x: 1 ) assert flow.reduce(lambda a, b: a + b).eval() == [10] assert flow.reload().reduce(lambda a, b: a + b).eval() == [10]
def test_filter(self, string_dataset): """Test filter registering""" flow = Flow().from_source(Source(fin=string_dataset.open())).filter( lambda x: 'line' not in x ) assert flow.chain[0].type == 'FLOW::FILTER' assert flow.batch(1)[0] == ['0', '1', '2', '3', '4']
def update_timeout_window_size(self): """Updates window size, and recorded partial acknowledgement packet ids after a timeout. """ Flow.update_timeout_window_size(self) self.fast_recovery = False self.first_partial_ack = -1 self.last_partial_ack = -1
class Reasoner(object): # The variables: def __init__(self, flow_filename, local_map, flavor_map=None, global_map=None, mapping=None, detail="low", output="cli", language="en"): self.globalities = json.load(open(global_map, 'r')) self.localities = json.load(open(local_map, 'r')) self.flavor = json.load(open(flavor_map, 'r')) self.map = json.load(open(mapping, 'r')) self.language = json.load(open(language, 'r')) if flavor_map is not None: sys.path.append("/".join(flavor_map.split('/')[:-1])) sys.path.append("/".join(local_map.split('/')[:-1])) self.localities = update(self.localities, self.flavor) self.mapping = Mapping(self.globalities, self.localities, self.map, self.language) self.flow = Flow(self.globalities, self.localities, self.map, self.language) self.detail = detail_level[detail] self.output = output self.res_context = {} self.model = rdflib.Graph() if self.model is None: raise Exception("new rdflib.Graph failed") self.parse_flow(flow_filename) def parse_flow(self, filename): self.flow_filename = filename #print "setting the local flow", self.flow_filename self.flow.parse(self.flow_filename) # Let's store all the RDF triples into the internal model def parse_input(self, filename): self.model.parse(filename) try: a = __import__("prerun") a.pre_run(self.model, res_context=self.res_context) except ImportError, e: pass except Exception, e: print >> sys.stderr, e traceback.print_exc(file=sys.stderr)
def test(): model = Darknet() print('Size of the test set:{}'.format(len(testset))) # load ckpt ckpt = torch.load(os.path.join(EXPT_DIR, CKPT_FILE)) model.load_state_dict(ckpt['model']) fw = Flow(model, trainset, testset, hyp) # write prediction fw.validate(batch_size=16)
def testGraph(numOfTests): flow = 0 flowGraph = Flow(fbGraph) for i in range(0, numOfTests): rand1 = randint(0, 4038) rand2 = randint(0, 4038) edgeFlow = flowGraph.maxFlow(rand1, rand2) flow += edgeFlow print('{}) souce: {} sink: {} max flow: {}'.format(i +1, rand1, rand2, edgeFlow)) # print(rand1, rand2, g.maxFlow(rand1, rand2)) print('Avg. Flow: {}'.format(flow/float(numOfTests)))
def test_map(self, numeric_dataset): """Test map registering""" flow = Flow().from_source(Source(fin=numeric_dataset.open())).map( lambda x: [float(_) for _ in x] ).map( lambda x: [2 * _ for _ in x] ) assert flow.chain[0].type == 'FLOW::MAP' assert flow.batch(1)[0] == [0, 0, 2, 4, 6, 8]
def test_flow(self): node1 = Node('Source', (45, 15), 1000, ':)') node2 = Node('Destination', (46, 16), 1000, ':)') undir_flow = Flow(node1, node2) dir_flow = Flow(node1, node2, directed=True) print(undir_flow) print(dir_flow) self.assertEqual(dir_flow.src, node1) self.assertEqual(dir_flow.dst, node2)
def main(): if os.path.isdir(sys.argv[1]): fl = Flow(os.path.join(sys.argv[1], 'depthmaps.txt'), os.path.join(sys.argv[1], 'images.txt'), \ os.path.join(sys.argv[1], 'masks_1.txt'), os.path.join(sys.argv[1], 'masks_2.txt'), \ os.path.join(sys.argv[1], 'masks_3.txt'), os.path.join(sys.argv[1], 'camera_pose.txt'), \ os.path.join(sys.argv[1], 'obj1_pose.txt'), os.path.join(sys.argv[1], 'obj2_pose.txt'),\ os.path.join(sys.argv[1], 'obj2_pose.txt'), os.path.join(sys.argv[1], 'calib.txt'), \ os.path.join(sys.argv[1], 'events_imgs.txt'), os.path.join(sys.argv[1], 'flow_gt_1.txt'), os.path.join(sys.argv[1], 'flow_pred.txt'),os.path.join(sys.argv[1], 'masks_full.txt'), [346,260], 4) fl.full_flow()
def __init__(self, flow_id, source, destination, amount, env, time, bw): """ Constructor for Flow class """ Flow.__init__(self, flow_id, source, destination, amount, env, time, bw) self._alpha = 20.0 self._gamma = 0.8 self.env.add_event(Event("Start window calc", self._flow_id, self._update_window), self._flow_start-1) self._total_num_pack = (int)(self._amount/(1024*8)) + 1 self._cwnd = self._alpha self._resend_time = 1
def infer(): model = Darknet() ckpt = torch.load(os.path.join(EXPT_DIR, CKPT_FILE)) model.load_state_dict(ckpt['model']) fw = Flow(model, trainset, testset, hyp) # get the indices of unlabeled data with open(os.path.join(EXPT_DIR, 'unlabeled.pkl'), 'rb') as f: dt = pickle.load(f) unlabeled = [i for i in dt] fw.infer(unlabeled) return
def __init__(self, flow_id, source, destination, amount, env, time, bw): """ Constructor for Flow class """ Flow.__init__(self, flow_id, source, destination, amount, env, time, bw) self._alpha = 20.0 self._gamma = 0.8 self.env.add_event( Event("Start window calc", self._flow_id, self._update_window), self._flow_start - 1) self._total_num_pack = (int)(self._amount / (1024 * 8)) + 1 self._cwnd = self._alpha self._resend_time = 1
def update_ack_window_size(self): """Updates window size when a packet is acknowledged. If the window size has reached the threshold, congestion avoidance will be switched on. """ if self.fast_recovery: self.fast_recovery = False self.window_size = math.ceil(self.ssthreshold) self.duplicate_counter = 0 self.record_window_size() else: Flow.update_ack_window_size(self)
def on_message(client, userdata, msg): current_pose = None f = [ 'tree_pose', 'warrior_1', 'crescent_lunge', 'warrior_2', 'triangle', 'half_moon', 'plank', 'chaturanga_dandasana', 'cobra' ] if msg.topic == 'hermes/intent/smayorquin:BeginClass': #Sample flow flow = Flow(f) flow.run() elif msg.topic == 'hermes/intent/smayorquin:StopClass': print("StopClass Intent detected!") try: del flow except: pass elif msg.topic == 'hermes/intent/smayorquin:PauseClass': print("PauseClass Intent detected!") current_pose = flow.current_pose try: del flow except: pass elif msg.topic == 'hermes/intent/smayorquin:RestartClass': print("RestartClass Intent detected!") flow = Flow(f, current_pose=current_pose) flow.run()
def __init__(self, env=None, n_episodes=0, gamma=0, lambda_epsilon=lambda x: x, alpha=0.001, epsilon=1): self.n_episodes = n_episodes self.epsilon = epsilon self.lambda_epsilon = lambda_epsilon self.env = env self.network = DQClassifier(alpha, (1, ), env.action_space.n) self.flow = Flow(self.network, gamma)
def test_compare(self): node1 = Node('Source', (45, 15), 1000, ':)') node2 = Node('Source', (45, 15), 1000, ':)') node3 = Node('Destination', (46, 16), 1000, ':)') self.assertTrue(node1 == node2) self.assertFalse(node1 == node3) flow1 = Flow(node1, node3) flow2 = Flow(node1, node3) flow3 = Flow(node1, node2) self.assertTrue(flow1 == flow2) self.assertFalse(flow1 == flow3)
class Reasoner(object): # The variables: def __init__(self, flow_filename, local_map, flavor_map=None, global_map=None, mapping=None, detail="low", output="cli", language="en"): self.globalities = json.load(open(global_map, 'r')) self.localities = json.load(open(local_map, 'r')) self.flavor = json.load(open(flavor_map, 'r')) self.map= json.load(open(mapping, 'r')) self.language= json.load(open(language, 'r')) if flavor_map is not None: sys.path.append("/".join(flavor_map.split('/')[:-1])) sys.path.append("/".join(local_map.split('/')[:-1])) self.localities = update(self.localities, self.flavor) self.mapping = Mapping(self.globalities, self.localities, self.map, self.language) self.flow = Flow(self.globalities, self.localities, self.map, self.language) self.detail = detail_level[detail] self.output = output self.res_context = {} self.model = rdflib.Graph() if self.model is None: raise Exception("new rdflib.Graph failed") self.parse_flow(flow_filename) def parse_flow(self, filename): self.flow_filename = filename #print "setting the local flow", self.flow_filename self.flow.parse(self.flow_filename) # Let's store all the RDF triples into the internal model def parse_input(self, filename): self.model.parse(filename) try: a = __import__("prerun") a.pre_run(self.model, res_context = self.res_context) except ImportError, e: pass except Exception, e: print >> sys.stderr, e traceback.print_exc(file=sys.stderr)
def solveNetworkGP(N, edgeCosts, edgeMaxFlows, sources, sinks): m = Flow(N) m.substitutions.update({ 'edgeCost': edgeCosts, 'edgeMaxFlow': edgeMaxFlows, 'source': sources, 'sink': sinks, }) m.substitutions.update({'slackCost': 1000}) #['sweep',np.linspace(100,10000,10)]}) m.cost = np.sum( m['edgeCost'] * m['flow']) + m['slackCost'] * np.prod(m['slack']) m = relaxed_constants(m) sol = m.localsolve(verbosity=4, reltol=10**-5, iteration_limit=100) return sol
def routing_compute_util(self, state): self.add_previous_jobs() cloned_links = self.graph.copy_links() valid = True paths_used = [] util = 0 job_config = JobConfig() for p in state: bw, links = p[0], p[1] for l in range(len(links) - 1): link_id = Link.get_id(links[l], links[l + 1]) link = cloned_links[link_id] link_bandwidth = link.get_bandwidth() if link_bandwidth < bw: valid = False break link.set_bandwidth(link_bandwidth - bw) if not valid: # logging.debug(str(state) + " cannot be built") break else: paths_used.append(p) if valid: self.graph.set_links(cloned_links) all_paths_used = deepcopy(paths_used) for job in self.jobs_config.values(): all_paths_used.extend(job.get_used_paths()) self.graph.set_flow(all_paths_used) util = 0 total_util = 0 for p in paths_used: flow = self.graph.get_flow(Flow.get_id(p[1][0], p[1][-1])) util += (flow.get_requested_bandwidth() + flow.get_effective_bandwidth()) for p in all_paths_used: flow = self.graph.get_flow(Flow.get_id(p[1][0], p[1][-1])) total_util += (flow.get_requested_bandwidth() + flow.get_effective_bandwidth()) job_config = JobConfig(util, total_util, copy_links(cloned_links), paths_used) self.reset() return job_config
def __init__(self, flow_filename, local_map, flavor_map=None, global_map=None, mapping=None, detail="low", output="cli", language="en"): self.globalities = json.load(open(global_map, 'r')) self.localities = json.load(open(local_map, 'r')) self.flavor = json.load(open(flavor_map, 'r')) self.map= json.load(open(mapping, 'r')) self.language= json.load(open(language, 'r')) if flavor_map is not None: sys.path.append("/".join(flavor_map.split('/')[:-1])) sys.path.append("/".join(local_map.split('/')[:-1])) self.localities = update(self.localities, self.flavor) self.mapping = Mapping(self.globalities, self.localities, self.map, self.language) self.flow = Flow(self.globalities, self.localities, self.map, self.language) self.detail = detail_level[detail] self.output = output self.res_context = {} self.model = rdflib.Graph() if self.model is None: raise Exception("new rdflib.Graph failed") self.parse_flow(flow_filename)
def __init__(self): self.mapping = Mapping() self.flow = Flow() self.model = RDF.Model() const.base_uri = RDF.Uri("baku") if self.model is None: raise Exception("new RDF.model failed")
def setUp(self): self.n1 = INode() self.n2 = INode() self.f1 = Flow() self.f1.addNode(self.n1) self.f1.addNode(self.n2) self.i1 = self.n2.i1 self.i2 = self.n1.i2 self.i3 = self.n2.i3 self.i4 = self.n1.i4
def handlePacket(self, pkt): IP_layer = IP if IP in pkt else IPv6 # add IPv6 support another night... if IP_layer == IPv6: return self.pkts.append(pkt) elts = [] edges = [] new_elts, new_edges = self.checkIP(pkt) if new_elts: elts += new_elts if new_edges: edges += new_edges new_elts, new_edges = self.checkDNS(pkt) if new_elts: elts += new_elts if new_edges: edges += new_edges # do flow analysis here, if necessary if TCP in pkt or UDP in pkt: Flow.pkt_handler(pkt, self.flows) flow = self.flows[Flow.flowid(pkt)] self.send_flow_statistics(flow) new_elts, new_edges = self.checkHTTP(flow) if new_elts: elts += new_elts if new_edges: edges += new_edges # end flow analysis if elts != [] or edges != []: self.send_nodes(elts, edges) if self.pcap: time.sleep(0.1)
def _run_flow(flow_spec): flow_spec_obj = None if type(flow_spec) is not dict: try: flow_spec_obj = json.loads(flow_spec, strict=False) except Exception as e: # print "invalid flow specification format" raise e else: flow_spec_obj = flow_spec aflow = Flow(flow_spec_obj.get("id"), flow_spec_obj.get("name")) for node_def in flow_spec_obj.get("nodes"): anode = create_node(node_def.get("spec_id"), node_def.get("id"), node_def.get("name")) aflow.add_node(anode) if "is_end" in node_def.keys() and node_def.get("is_end") == 1: end_node = anode for port_def in node_def.get("ports"): anode.set_inport_value(port_def.get("name"), port_def.get("value")) for link_def in flow_spec_obj.get("links"): source = link_def.get("source").split(":") target = link_def.get("target").split(":") aflow.link(source[0], source[1], target[0], target[1]) stats = aflow.run(end_node) return stats
class TestRunner(unittest.TestCase): def test_very_simple_flow(self): logging.basicConfig() self.flow = Flow() self.input = FileInputNode(id='input') self.input.filepath.value = __file__ tmp = tempfile.NamedTemporaryFile() self.output = FileOutputNode(id='output') self.output.filepath.value = tmp.name self.flow.addNode(self.input) self.flow.addNode(self.output) self.flow.addConnector(self.input.output, self.output.input) r = Runner(self.flow) r.start() lines = tmp.readlines() self.assertTrue(lines) self.assertEqual("#!/usr/bin/python\n", lines[0]) self.assertEqual(" unittest.main()\n", lines[-1]) tmp.close()
def set_flow(self, chosen_paths): self.flows = {} # print "chosen_paths: ", chosen_paths # Create Flow Objects for cp in self.comm_pattern: fl = Flow(cp[0], cp[1], cp[2]) self.flows[fl.get_end_points()] = fl for p in chosen_paths: path = p[1] link_list = [] for i in range(len(path)-1): l = self.links[Link.get_id(path[i], path[i+1])] link_list.append(l) fl = self.flows[Flow.get_id(path[0], path[-1])] fl.set_path(link_list) for link in self.get_links().values(): link.adjust_flow_bandwidths()
class TestNode(unittest.TestCase): def setUp(self): self.n1 = INode() self.f1 = Flow() self.f1.addNode(self.n1) def test_findInterface(self): self.assertEqual(self.n1.i1, self.n1.findInterface('i1')) self.assertRaises(FlowError, self.n1.findInterface, 'foo') def test_applyAttributes(self): self.assertEqual(self.n1.id, '') self.n1.applyAttributes({'id': ('foo', None)}) self.assertEqual(self.n1.id, 'foo') self.assertEqual(self.n1.i1.value, None) self.assertEqual(self.n1.i1.slot, True) self.assertEqual(self.n1.i3.value, None) self.assertEqual(self.n1.i3.slot, True) self.n1.applyAttributes({'i1': ('bar', False), 'i3': ('foo', True)}) self.assertEqual(self.n1.i1.value, 'bar') self.assertEqual(self.n1.i1.slot, False) self.assertEqual(self.n1.i3.value, 'foo') self.assertEqual(self.n1.i3.slot, True) self.assertEqual(self.n1.i2.value, None) self.assertEqual(self.n1.i4.value, None) def test_interfaces(self): self.assertEqual(4, len(self.n1.interfaces)) self.assertTrue(all([i in self.n1.inputInterfaces for i in [self.n1.i1, self.n1.i3]])) self.assertTrue(all([i in self.n1.outputInterfaces for i in [self.n1.i2, self.n1.i4]])) self.assertTrue(self.n1.i1 in self.n1.inputSlotInterfaces) self.n1.i3.slot = False self.assertFalse(self.n1.i3 in self.n1.inputSlotInterfaces)
def update_jobs_utilization(self): used_paths = [] self.add_previous_jobs() for job in self.jobs_config.values(): used_paths.extend(job.get_used_paths()) self.graph.set_flow(used_paths) for job in self.jobs_config.values(): util = 0 for p in job.get_used_paths(): flow = self.graph.get_flow(Flow.get_id(p[1][0], p[1][-1])) util += (flow.get_requested_bandwidth() + flow.get_effective_bandwidth()) job.set_util(util) self.reset()
def initialize(self): """ Initializes the router. """ events = [] # Creates a flow for each neighbor of the router for dest in self.neighbors(): if not isinstance(dest, Router): continue congestion = AIMD() flow = Flow(congestion) flow.start(0) flow.dest(dest) congestion.initialize(flow) self._flows[dest] = flow self._changed[dest] = False # Initializes the routing algorithm self._algorithm.initialize(self) for (dest, flow) in self._flows.iteritems(): packet = self._create_packet(self, dest) packet.set_create_time(flow.start()) port = self._algorithm.next(dest) # Checks that destination is reachable if port is None: continue # Creates an event for the starting time of the flow create_event = self._create_event(flow.start(), port, Event._CREATE, packet) events.append(create_event) self._next_update += Router._UPDATE_EVERY return events
def run(self): msisdn = input("Enter your MSISDN: \n #123# for Banking \n #234# for Calculator \n") if msisdn in self.flows: Flow.create_flow(self.flows[msisdn], StringUtils.camelize(self.flows[msisdn])).menu() else: self.run()
def _get_flows(user=None, status=None, name=None, n=5): workflows = take(api.get_workflows({'user': user, 'status': status, 'name': name}), n) return [Flow.from_workflow_id(w['id']) for w in workflows]
def _initialize(self, config): curr_type = -1 devices = {} links = {} flows = {} measure_flows = {} measure_links = {} for line in config: line = line.strip() if line in self._type_format: curr_type = self._type_format[line] continue # Hosts if curr_type == 0: [name] = line.split(', ') host = Host(name) devices[name] = host # Routers elif curr_type == 1: [name, algorithm] = line.split(', ') if algorithm == BellmanFord._TYPE: algorithm = BellmanFord() router = Router(algorithm, name) devices[name] = router # Connections elif curr_type == 2: [name, source, dest, rate, delay, size] = line.split(', ') source_device = devices[source] dest_device = devices[dest] rate = int(rate) delay = float(delay) size = int(size) source_link = Link() dest_link = Link() source_port = Port() source_port.source(source_device) source_port.conn(dest_link) source_port.incoming(Buffer(size)) source_port.outgoing(Buffer(size)) dest_port = Port() dest_port.source(dest_device) dest_port.conn(source_link) dest_port.incoming(Buffer(size)) dest_port.outgoing(Buffer(size)) source_link.dest(source_port) source_link.rate(rate) source_link.delay(delay) source_link.getTracker().set_delay(delay) dest_link.dest(dest_port) dest_link.rate(rate) dest_link.delay(delay) dest_link.getTracker().set_delay(delay) source_device.enable(source_port) dest_device.enable(dest_port) links[name] = dest_link # Flows elif curr_type == 3: [name, source, dest, size, time, algorithm] = line.split(', ') source_device = devices[source] dest_device = devices[dest] size = int(size) time = float(time) if algorithm == AIMD._TYPE: algorithm = AIMD() elif algorithm == FAST._TYPE: algorithm = FAST() flow = Flow(algorithm) flow.bits(size) flow.start(time) flow.dest(dest_device) algorithm.initialize(flow) source_device.connect(flow) flows[name] = flow # Measurables elif curr_type == 4: # TODO: track measurables [name, type] = line.split(', ') if type == "flow": measure_flows[name] = flows[name] elif type == "link": measure_links[name] = links[name] return (devices.values(), measure_flows, measure_links)
#! /usr/bin/env python """ echo_message.py Bot that echoes messages back. usage: ./echo_message.py """ from flow import Flow flow = Flow() # Start local account (by default the first one) flow.start_up() # Define a function to deal with 'message' notifications @flow.message def echo_message(notif_type, data): # There are two types of messages, # 'regularMessages' and 'channelMessages'. # We only care about 'regularMessages' # ('channelMessages' are used for other purposes) regular_messages = data["regularMessages"] for message in regular_messages: sender_id = message["senderAccountId"] # Just echo messages coming from other account if sender_id != flow.account_id(): cid = message["channelId"] # Get the channel properties
class Reasoner: # The variables: def __init__(self): self.mapping = Mapping() self.flow = Flow() self.model = RDF.Model() const.base_uri = RDF.Uri("baku") if self.model is None: raise Exception("new RDF.model failed") def parse_map(self, filename): self.mapping.parse(filename) def parse_flow(self, filename): self.flow.parse(filename) # Let's store all the RDF triples into the internal model def parse_input(self, filename): # parse the file parser = RDF.Parser('raptor') if parser is None: raise Exception("Failed to create RDF.Parser raptor") uri = RDF.Uri(string = "file:" + filename) # all the triples in the model for s in parser.parse_as_stream(uri, const.base_uri): self.model.add_statement(s) def parse_json_input(self, filename): import json2rdf, json data = json.loads(open(filename).read()) if type(data) is list: data = data[0] if not type(data) is dict: raise Exception('The JSON data is not a dict') rdf_string = json2rdf.convert(data) # parse the string parser = RDF.Parser('raptor') if parser is None: raise Exception("Failed to create RDF.Parser raptor") uri = RDF.Uri(string = "file:" + filename) # all the triples in the model for s in parser.parse_string_as_stream(rdf_string, const.base_uri): self.model.add_statement(s) # Debug info def info(self): self.mapping.info() self.flow.info(); # the main operation of the reasoner def run(self): # Let's start from the root node n = self.flow.root_node() # Until we have a node... while isinstance(n, Node): # maybe this is already the answer: if n.is_question() == False: print 'The solution is:',n.text.encode('utf8') break # Let's think about this question: print 'Question:', n.text.encode('utf8') option = self.mapping.choose(self.model, n) # The option choosed is: print 'Answer:', option.text.encode('utf8'), "\n" n = self.flow.node(option.node)
def test_flow(self): flow1 = Flow('host1', 'host2', 1000) self.assert_(flow1.get_requested_bandwidth() == 1000) self.assert_(flow1.get_end_points() == ('host1', 'host2')) self.assert_(not flow1.get_path())
import sys import time import string import random from flow import Flow if len(sys.argv) < 3: print("usage: %s <username> <recoveryKey>" % sys.argv[0]) sys.exit(os.EX_USAGE) username = sys.argv[1] password = sys.argv[2] flow = Flow() print("* Checking if account is already installed...") try: flow.start_up( username=username, ) print("* Account already installed.") sys.exit(0) except Flow.FlowError as flow_err: pass print("* Account not installed, trying local device creation...") try:
./auto_join.py <teamId> """ import os import sys from flow import Flow if len(sys.argv) < 2: print("usage: %s <teamId>" % sys.argv[0]) sys.exit(os.EX_USAGE) team_id = sys.argv[1] flow = Flow() # Log in with the first local device found in the system flow.start_up() # Callback to automatically add users to the team and all its channels def accept(notif_type, notif_data): for ojr in notif_data: if ojr["orgId"] == team_id: user_id = ojr["accountId"] username = flow.get_peer_from_id(user_id)["username"] # Add user to Team flow.org_add_member(team_id, user_id, "m") print("* user '%s' added to team." % username) # Add user to all Channels within Team
def convert_model(master_spec, sess): # Create flow. flow = Flow() builder = FlowBuilder(sess, flow) # Get components. components = [] connectors = {} for c in master_spec.component: component = Component(c, builder, connectors) components.append(component) # Extract components. for c in components: c.extract() # Sanitize names. for c in components: flow.rename_prefix(c.path() + "/", c.name + "/") flow.rename_suffix("/ExponentialMovingAverage:0", "") flow.rename_suffix(LSTM_H_IN + ":0", "h_in") flow.rename_suffix(LSTM_H_OUT + ":0", "h_out") flow.rename_suffix(LSTM_C_IN + ":0", "c_in") flow.rename_suffix(LSTM_C_OUT + ":0", "c_out") flow.rename_suffix(FF_HIDDEN + ":0", "hidden") flow.rename_suffix(FF_OUTPUT + ":0", "output") # Get external resources. lexicon_file = None prefix_file = None suffix_file = None commons_file = None actions_file = None for c in master_spec.component: for r in c.resource: if r.name == "word-vocab": lexicon_file = r.part[0].file_pattern elif r.name == "prefix-table": prefix_file = r.part[0].file_pattern elif r.name == "suffix-table": suffix_file = r.part[0].file_pattern elif r.name == "commons": commons_file = r.part[0].file_pattern elif r.name == "action-table": actions_file = r.part[0].file_pattern # Add lexicon to flow. if lexicon_file != None: lexicon = flow.blob("lexicon") lexicon.type = "dict" lexicon.add_attr("delimiter", 10) lexicon.add_attr("oov", 0) lexicon.add_attr("normalize_digits", 1) lexicon.data = read_file(lexicon_file) # Add prefix table to flow. if prefix_file != None: prefixes = flow.blob("prefixes") prefixes.type = "affix" prefixes.data = read_file(prefix_file) # Add suffix table to flow. if suffix_file != None: suffixes = flow.blob("suffixes") suffixes.type = "affix" suffixes.data = read_file(suffix_file) # Add commons to flow. if commons_file != None: commons = flow.blob("commons") commons.type = "frames" commons.data = read_file(commons_file) # Add action table to flow. if actions_file != None: actions = flow.blob("actions") actions.type = "frames" actions.data = read_file(actions_file) return flow
Inputs: A schedule of events based on entered times. Outputs: Sequence of events to a screen as they happen. daily flow rate data ''' from toiletschedule import Schedule from flow import Flow import datetime import time workingSchedule = Schedule() midnight = datetime.time() nextStartTime = datetime.time() nextEndTime = datetime.time() meter = Flow() counter = 1 meter.disableStepper() # prevent the motor for burning up while True: print "\n1: Run Schedule" print "2: Manage Schedules" print "3: Exit" option = 0 while option == 0: input = raw_input("\nPlease select an option.\n") if input.isdigit(): option = int(input)
def do_flow(self): f = Flow(self.cuts) hjy = f.estimate_minpoint(self.rephi_corr, None, self.pephi_corr)
def menu(self): self.input = input("1. Balance Inquiry \n2. Mini Statement \n3. Funds Transfer \n9. Exit \n") flow = Flow() flow.do(self.operations, self.input, "bank_flow")
class TestFlow(unittest.TestCase): def setUp(self): self.flow = Flow() self.n1 = INode() self.n2 = INode() self.f1 = Flow() self.f1.addNode(self.n1) self.f1.addNode(self.n2) def test_addNode(self): self.assertEqual(self.flow.nodes, []) n = Node() self.flow.addNode(n) self.assertEqual(self.flow.nodes, [n]) self.assertEqual(n.flow, self.flow) self.assertTrue(n in self.flow.nodes) self.assertTrue(n in self.flow.startNodes) # Add it twice self.flow.addNode(Node(id='foo')) self.assertRaises(FlowError, self.flow.addNode, Node(id='foo')) # Add it twice without name self.flow.addNode(Node()) self.flow.addNode(Node()) # Not raising def test_removeNode(self): self.assertEqual(self.flow.nodes, []) self.assertRaises(FlowError, self.flow.removeNode, Node()) n = Node() self.flow.addNode(n) self.flow.removeNode(n) self.assertFalse(n in self.flow.nodes) self.assertEqual(n.flow, None) def test_findNode(self): self.assertRaises(NodeNotFoundError, self.flow.findNode, 'foo') n = Node() self.flow.addNode(n) self.assertEqual(n, self.flow.findNode('')) n = Node(id='bar') self.flow.addNode(n) self.assertNotEqual(n, self.flow.findNode('')) self.assertEqual(n, self.flow.findNode('bar')) def test_randomId(self): class FooNode(Node): label = 'foo' self.flow.addNode(FooNode()) self.assertEqual('foo', self.flow.randomId(FooNode())) self.flow.addNode(FooNode()) self.assertEqual('foo-2', self.flow.randomId(FooNode())) self.flow.addNode(FooNode()) self.assertEqual('foo-3', self.flow.randomId(FooNode())) self.flow.addNode(FooNode()) self.assertEqual('foo-4', self.flow.randomId(FooNode())) def test_addConnector(self): self.f1.addConnector(self.n1.i2, self.n2.i1) self.assertEqual(1, len(self.n1.successors)) self.assertEqual(0, len(self.n2.successors)) self.assertEqual(0, len(self.n1.predecessors)) self.assertEqual(1, len(self.n2.predecessors)) self.assertTrue(self.n1 in self.f1.startNodes) self.assertFalse(self.n2 in self.f1.startNodes) self.assertRaises(FlowError, self.f1.addConnector, self.n2.i1, self.n1.i2) def test_removeConnector(self): self.f1.addConnector(self.n2.i2, self.n1.i1) self.assertRaises(FlowError, self.f1.removeConnector, self.n1.i1, self.n2.i2) self.flow.removeConnector(self.n2.i2, self.n1.i1) self.assertTrue(self.n1 in self.f1.startNodes) self.assertTrue(self.n2 in self.f1.startNodes) self.assertEqual(0, len(self.n1.successors)) self.assertEqual(0, len(self.n2.successors)) self.assertEqual(0, len(self.n1.predecessors)) self.assertEqual(0, len(self.n2.predecessors)) self.assertRaises(FlowError, self.f1.removeConnector, self.n2.i2, self.n1.i1)