def __init__(self, *, torrent: state.Torrent, complete_pieces_to_write: trio.MemorySendChannel, write_confirmations: trio.MemoryReceiveChannel, blocks_to_read: trio.MemorySendChannel, blocks_for_peers: trio.MemoryReceiveChannel, auto_shutdown=False) -> None: self._auto_shutdown = auto_shutdown self._state = torrent # interact with self self._peers_without_connection = trio.open_memory_channel( config.INTERNAL_QUEUE_SIZE) # interact with FileManager self._complete_pieces_to_write = complete_pieces_to_write self._write_confirmations = write_confirmations self._blocks_to_read = blocks_to_read self._blocks_for_peers = blocks_for_peers # interact with peer connections self._msg_from_peer = trio.open_memory_channel( config.INTERNAL_QUEUE_SIZE) # queues for sending TO peers are initialized on a per-peer basis self._peers: Dict[bytes, peer_state.PeerState] = dict() # data received but not written to disk self._received_blocks: Dict[int, Tuple[bitarray, bytearray]] = dict() self.requests = requests.RequestManager() if config.MAX_OUTGOING_BYTES_PER_SECOND is None: self.token_bucket: Union[NullBucket, TokenBucket] = NullBucket() else: self.token_bucket = TokenBucket( config.MAX_OUTGOING_BYTES_PER_SECOND)
def __init__(self, key='', secret=''): self.url = 'https://api.cryptsy.com/api' self.key = key self.secret = secret self.markets = None self.rate_limiter = TokenBucket(RATE) random.seed(time.time())
def generateRouters(graphSizeX, graphSizeY, areaSize, routerChancePerArea, rate, capacity, calendarQueue): routerList = {} routerId = 0 # generates routers homogeneously in a grid while avoiding generating routers to close from each other for i in range(graphSizeX//areaSize): for j in range(graphSizeY//areaSize): if (random.random() < routerChancePerArea): storage = MemoryStorage() bucket = TokenBucket(rate, capacity, storage) x = int((random.random()*0.5 + i + 0.25) * areaSize) y = int((random.random()*0.5 + j + 0.25) * areaSize) router = Router(routerId, x, y, state=True, tokenBucket=bucket, neighbours={}, LSDB={}, bufferSize=10, calendar=calendarQueue, linkStates=None) routerList[routerId] = router routerId+=1 return routerList
def worker(rank, size, input_file_specs, batch_size=256, warmup_sec=10.0, run_sec=60 * 60 * 4, num_threads=0, sync=False, warn_latency_sec=4.0, report_period_sec=2.0, round_robin_files=True, throttle_sleep_sec=0.01, throttle_total_rate_bytes_per_sec=0): if rank == 0: print('storage_benchmark_tensorflow: BEGIN') print(datetime.datetime.utcnow()) metrics_file_name = '/imagenet-scratch/logs/storage_benchmark_tensorflow_metrics-%d.log' % rank with open(metrics_file_name, 'a') as metrics_file: hostname = socket.gethostname() # Set random seed to have deterministic behavior. tf.set_random_seed(rank + 1) # Round robin the input file spec. This allows multiple mount points to be used. input_file_spec = input_file_specs[hvd.local_rank() % len(input_file_specs)] print('rank=%3d: %s: input_file_spec=%s' % (rank, hostname, input_file_spec)) if round_robin_files: # Distribute sets of file names evenly over all processes and without overlap. all_input_filenames = sorted(glob.glob(input_file_spec)) num_files = len(all_input_filenames) i = rank input_filenames = [] while i < num_files: input_filenames.append(all_input_filenames[i]) i += size print( 'rank=%3d: Found %d total files. %d files assigned to this process.' % (rank, len(all_input_filenames), len(input_filenames))) if len(input_filenames) == 0: raise ValueError('Not enough matching files.') input_file_spec = None else: # This will use tf.data.TFRecordDataset.list_files to randomly distribute files. input_filenames = None # # Build execution graph. # ds_iterator = create_iterator(batch_size, num_threads, input_file_spec=input_file_spec, input_filenames=input_filenames) # num_bytes_tensor is an int64 tensor of shape (batch_size). num_bytes_tensor = ds_iterator.get_next() # When num_bytes_for_step_tensor is evaluated, it reads the TFRecord files. num_bytes_for_step_tensor = tf.reduce_sum(num_bytes_tensor) # The following operations are used to synchronize the processes when running in sync mode. if sync: stop_flag_placeholder = tf.placeholder(tf.bool, shape=()) stop_flag_broadcast_tensor = hvd.broadcast(stop_flag_placeholder, 0, 'stop_flag_broadcast') num_bytes_for_step_placeholder = tf.placeholder(tf.int64, shape=()) total_bytes_for_step_tensor = hvd.allreduce( num_bytes_for_step_placeholder, average=False) # # Start the TensorFlow session and execute the graph. # config = tf.ConfigProto() config.device_count['GPU'] = 0 config.intra_op_parallelism_threads = 1 config.inter_op_parallelism_threads = 1 print('rank=%3d: Creating session' % rank) with tf.Session(config=config) as session: print('rank=%3d: Session created' % rank) session.run( [tf.initializers.global_variables(), tf.tables_initializer()]) print('rank=%3d: Initialized variables' % rank) # Run first step. This can take 30 seconds for 100,000 files. print('rank=%3d: Running first step' % rank) _ = session.run(num_bytes_for_step_tensor) print('rank=%3d: First step complete' % rank) # Wait for barrier so we know when all processes have finished the first step. print('rank=%3d: Waiting for barrier' % rank) session.run(hvd.allreduce(tf.constant(0))) if rank == 0: print('rank=%3d: Completed waiting for barrier' % rank) # To ensure that all processes finish warmup and stop at exactly the same time, # the rank 0 node broadcasts its time to all other ranks. # This also serves as a synchronization barrier. local_t0 = time.time() t0_tensor = tf.constant(local_t0, tf.float64) t0_tensor = hvd.broadcast(t0_tensor, 0, 't0') t0 = session.run(t0_tensor) start_time = t0 + warmup_sec stop_time = start_time + run_sec step = 0 warmed_up = False num_records = 0 num_bytes = 0 total_bytes = 0 next_report_time = time.time() + report_period_sec if throttle_total_rate_bytes_per_sec: throttle_rate_bytes_per_sec = throttle_total_rate_bytes_per_sec / size burst_sec = 1.0 throttle = TokenBucket(tokens=throttle_rate_bytes_per_sec * burst_sec, fill_rate=throttle_rate_bytes_per_sec) else: throttle = None while True: # Reset all counters when warmup completes. t = time.time() if not warmed_up and t >= start_time: print('rank=%3d: warmup complete at step %d' % (rank, step)) warmed_up = True t0 = start_time step = 0 num_records = 0 num_bytes = 0 total_bytes = 0 # Run a single step of batch_size records per process. run_options = tf.RunOptions() # run_options.timeout_in_ms = 10000 num_bytes_for_step = np.int64(0) try: num_bytes_for_step = session.run(num_bytes_for_step_tensor, options=run_options) except Exception as e: print('rank=%3d: %s: ERROR: %s' % (rank, hostname, e)) step_dt = time.time() - t if (warmed_up or step >= 1) and step_dt > warn_latency_sec: print('rank=%3d: %s: WARNING: step %d took %0.3f seconds' % (rank, hostname, step, step_dt)) next_report_time = 0.0 # Calculate local stop flag. In sync mode, this is broadcast from rank 0. stop_flag = time.time() >= stop_time # Use Horovod to aggregate the byte counter across all processes. # This also acts as a synchronization barrier, much like gradient descent when # it shares gradients. # Also coordinate the stop flag so all processes stop at the same step. sync_dt = 0.0 if sync: t = time.time() total_bytes_for_step, stop_flag = session.run( [ total_bytes_for_step_tensor, stop_flag_broadcast_tensor ], feed_dict={ num_bytes_for_step_placeholder: num_bytes_for_step, stop_flag_placeholder: stop_flag, }, ) total_bytes += total_bytes_for_step sync_dt = time.time() - t if warmed_up and sync_dt > 30.0: print( 'rank=%3d: %s: WARNING: sync after step %d took %0.3f seconds' % (rank, hostname, step, sync_dt)) next_report_time = 0.0 num_records += batch_size num_bytes += num_bytes_for_step t = time.time() metrics = { '@timestamp': datetime.datetime.utcnow().isoformat() + 'Z', 'batch_size': batch_size, 'rank': rank, 'hostname': hostname, 'step': step, 'num_bytes': int(num_bytes_for_step), 'latency_sec': step_dt, 'sync_latency_sec': sync_dt, } json.dump(metrics, metrics_file) metrics_file.write("\n") metrics_file.flush() if t >= next_report_time: dt = t - t0 if not sync: records_per_sec = num_records / dt bytes_per_sec = num_bytes / dt MB_per_sec = bytes_per_sec / 1e6 print( 'rank=%3d: warmed_up=%d, step=%6d, records/sec=%8.0f, MB/sec=%11.3f, records=%10d, bytes=%15d, dt=%9.3f' % (rank, warmed_up, step, records_per_sec, MB_per_sec, num_records, num_bytes, dt)) if sync: if rank == 0: total_records = num_records * size records_per_sec = total_records / dt bytes_per_sec = total_bytes / dt MB_per_sec = bytes_per_sec / 1e6 print( 'TOTAL: warmed up=%d, step=%6d, records/sec=%8.0f, MB/sec=%11.3f, records=%10d, bytes=%15d, dt=%9.3f' % (warmed_up, step, records_per_sec, MB_per_sec, total_records, total_bytes, dt)) next_report_time = t + report_period_sec # Throttle byte rate. if throttle: while not throttle.consume(num_bytes_for_step): # print('sleeping') time.sleep(throttle_sleep_sec) if stop_flag: print('rank=%3d: %s: complete at step %d' % (rank, hostname, step)) break step += 1 # Use Horovod to aggregate the final counters across all processes. num_steps_tensor = tf.constant(step) num_bytes_tensor = tf.constant(num_bytes) total_steps_tensor = hvd.allreduce(num_steps_tensor, average=False) total_bytes_tensor = hvd.allreduce(num_bytes_tensor, average=False) total_steps, total_bytes = session.run( [total_steps_tensor, total_bytes_tensor]) if rank == 0: dt = stop_time - start_time num_records = total_steps * batch_size records_per_sec = num_records / dt total_GB = total_bytes / 1e9 bytes_per_sec = total_bytes / dt MB_per_sec = bytes_per_sec / 1e6 print('FINAL: number of processes: %12d' % size) print('FINAL: batch size: %12d' % batch_size) print('FINAL: sync: %12s' % sync) print('FINAL: round robin files: %12s' % round_robin_files) print('FINAL: number of records: %12d' % num_records) print('FINAL: GB: %12.3f' % total_GB) print('FINAL: elapsed sec: %12.3f' % dt) print('FINAL: records/sec: %12.0f' % records_per_sec) print('FINAL: MB/sec: %12.3f' % MB_per_sec) if rank == 0: print('storage_benchmark_tensorflow: END')
def btn_aniadir(): n = int(en_n.get()) t = int(en_t.get()) my_pattern.append((n, t)) scrolled_text.configure(state='normal') scrolled_text.insert( 'insert', '[*] ' + str(n) + ' Paquetes de ' + str(t) + ' bytes' + '\n') scrolled_text.configure(state='disabled') if __name__ == '__main__': tb = TokenBucket() #tb.set_pattern([(800,8),(600,28),(500,6),(500,25),(2000,8),(400,25)]) my_pattern = [] window = Tk() window.title("Token Bucket") window.geometry('1000x400') numero_de_paquetes = 0 radio_var = IntVar() btn = Button(window, text="Empezar", command=btn_empezar) btn.grid(column=90, row=90) lbl_aue = Label(window, text="Añade un elemento:",
self.scheduler.enter(process_time, 1, router.process_packet) if __name__ == "__main__": listStorage = [] listTocken = [] listRouter = [] rate = 10 capacity = 100 simu = SimulatedTime(0) calendarQueue = CalendarQueue(simu) for i in range(10): listStorage.append(MemoryStorage()) listTocken.append(TokenBucket(rate, capacity, listStorage[i])) listRouter.append( Router(id=i, x=0, y=0, state=True, tokenBucket=listTocken[i], neighbours={}, LSDB={}, bufferSize=10, calendar=calendarQueue, linkStates=None)) if (i > 2): listRouter[i].add_neighbour(2, i) listRouter[2].add_neighbour(i, i)
def __init__(self, *args, **kwargs): super(Detection, self).__init__(*args, **kwargs) random.seed() sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) sys.stdin = os.fdopen(sys.stdin.fileno(), 'r', 0) print >> sys.stderr, 'Loading topology' self.read_config() self.rules = load_rules(Detection.FILENAME + '.rules.pkl') self.topology_graph = load_topology(Detection.FILENAME + '.graphml') self.testpackets = load_testpackets(Detection.FILENAME + '.testpackets.pkl') self.datapaths = {} self.packetouts = {} self.packetouts_store = {} self.reputation = defaultdict(int) #record test packet #key is (switch_id, header), value is tp_index self.heaer_tpindex = {} #key is test packet index self.send_tpindex = set() self.recv_tpindex = set() self.send_tpindex_store = [] self.ori_tetspackets_size = len(self.testpackets) self.inc_testpackets_size = 0 #for incremental adding rules self.adding_ruleid_set = set() self.adding_ruleid = list() if Detection.INCREMENTAL_ADDING: self.adding_ruleid_set, self.adding_ruleid, self.testpackets = initialize_adding_rules( self.testpackets, adding_path_number=800, is_random=True, is_shuffle=True) self.adding_ruleid = self.adding_ruleid[:3000] #print len(self.adding_ruleid), 'adding rules' if Detection.DEBUG_MSG: 'Adding-rule id:', self.adding_ruleid #for incremental deleting rules self.deleting_ruleid = list() if Detection.INCREMENTAL_DELETING: self.deleting_ruleid = initialize_deleting_rules( self.testpackets, deleting_path_number=700, is_random=True, is_shuffle=True) self.deleting_ruleid = self.deleting_ruleid[:3000] #print len(self.deleting_ruleid), 'deleting rules' if Detection.DEBUG_MSG: 'Deleting-rule id:', self.deleting_ruleid #simulate the attacker #key is rule id which is compromised by switch self.persistent_fault = initialize_persistent_fault( Detection.PERSISTENT_NUM, self.rules, self.testpackets) self.nonpersistent_fault = initialize_nonpersistent_fault( Detection.NONPERSISTENT_NUM, self.rules, self.testpackets, self.persistent_fault) self.catch_fault = set() if Detection.PERSISTENT_NUM > 0: print >> sys.stderr, 'Presistent fault:', self.persistent_fault #print 'Non-presistent fault:', self.nonpersistent_fault #for recording traffic self.packet_counter = 0 self.traffic = [] self.traffic_start_time = 0.0 self.bucket = TokenBucket(Detection.TRAFFIC_RATE, Detection.TRAFFIC_RATE) #for experiment script #write_controller_status(1) print >> sys.stderr, 'Waiting for switches to connect...'