def __init__(self, id): SimpleModule.__init__(self, id) config_parser = ConfigurationParser.get_instance() self.buffering_until = int(config_parser.get_parameter('buffering_until')) self.max_buffer_size = int(config_parser.get_parameter('max_buffer_size')) self.playback_step = int(config_parser.get_parameter('playbak_step')) self.url_mpd = config_parser.get_parameter('url_mpd') # last pause started at time self.pause_started_at = None self.pauses_number = 0 # tag to verify if buffer has an minimal amount of data self.buffer_initialization = True # Does the player already started to download a segment? self.already_downloading = False # buffer itself self.buffer = [] # the buffer played position self.buffer_played = 0 # history of what was played in buffer self.playback_history = [] #initialize with the first segment sequence number to download self.segment_id = 1 self.parsed_mpd = '' self.qi = [] self.timer = Timer.get_instance() #threading playback self.playback_thread = threading.Thread(target=self.handle_video_playback) self.player_thread_events = threading.Event() self.lock = threading.Lock() self.kill_playback_thread = False self.request_time = 0 self.playback_qi = OutVector() self.playback_quality_qi = OutVector() self.playback_pauses = OutVector() self.playback = OutVector() self.playback_buffer_size = OutVector() self.throughput = OutVector() self.whiteboard = Whiteboard.get_instance() self.whiteboard.add_playback_history(self.playback.get_items()) self.whiteboard.add_playback_qi(self.playback_qi.get_items()) self.whiteboard.add_playback_pauses(self.playback_pauses.get_items()) self.whiteboard.add_playback_buffer_size(self.playback_buffer_size.get_items()) self.whiteboard.add_buffer(self.buffer) self.whiteboard.add_max_buffer_size(self.max_buffer_size)
def __init__(self, id): SimpleModule.__init__(self, id) self.initial_time = 0 self.qi = [] # for traffic shaping config_parser = ConfigurationParser.get_instance() self.traffic_shaping_interval = int( config_parser.get_parameter('traffic_shaping_profile_interval')) self.traffic_shaping_seed = int( config_parser.get_parameter('traffic_shaping_seed')) self.traffic_shaping_values = [] # mark the current traffic shapping interval self.current_traffic_shaping_interval = 0 self.traffic_shaping_sequence = [] # traffic shaping sequence position self.tss_position = 0 # traffic shaping values position self.tsv_position = 0 token = config_parser.get_parameter('traffic_shaping_profile_sequence') for i in range(len(token)): if token[i] == 'L': self.traffic_shaping_sequence.append(0) elif token[i] == 'M': self.traffic_shaping_sequence.append(1) elif token[i] == 'H': self.traffic_shaping_sequence.append(2) self.timer = Timer.get_instance()
def __init__(self, id): IR2A.__init__(self, id) self.qi = [] self.throughput = 0 self.request_time = 0 self.vM = 0 self.timer = Timer.get_instance() self.pause_started_at = None
def __init__(self, id): IR2A.__init__(self, id) self.parsed_mpd = '' self.qi = [] # Usamos o tempo para cálculo de vazão, usando o método descrito no documento de especificação. self.timer = Timer.get_instance() self.request_time = self.timer.get_started_time() # Mantém-se uma lista das qualidades escolhidas durante a execução do programa. # Ela é utilizada no algoritmo de decisão. self.chosen_qi = [0] # Também é mantida uma lista de valores de vazão medidos em cada ponto de escolha, para uso no algoritmo. self.throughput = [0]
def image_upload(track_id, tpid, tpe, seq, image_type, content, task_seq='!!!', batch='!!!'): if not task_seq: task_seq = '!!!' if not batch: batch = '!!!' _timer = Timer() _timer.stage_begin('image write') if not image_write(track_id, tpid, tpe, seq, image_type, content, task_seq, batch): return False _timer.stage_begin('set batch') if not batch_manager.set_batch(track_id, tpid, tpe, seq, image_type, batch): return False _timer.finish() logger().debug(_timer.dump()) return True
def __init__(self, id): IR2A.__init__(self, id) self.parsed_mpd = '' self.qi = [] self.current_quality = 0 self.segmentos_baixados = 0 self.timer = Timer.get_instance() self.momento_requisicao = 0 self.taxa_bits = 0 self.menor_taxa = math.inf self.maior_taxa = 0 self.historico_t = [] self.current_buffer = 0 self.quedas_consecutivas = 0 self.logger = None
def test_step(epoch, data, cfg, model, device): # get raw output; this can be for several layers outputs = pytorch_model_outputs(model, data, device) # cls metrics confmat = get_confmat_from_raw(outputs, data) print(confmat) acc = get_acc_from_confmat(confmat) recall = get_recall_from_confmat(confmat) precision = get_precision_from_confmat(confmat) if cfg.smoothing: sm_confmat = get_smoothed_confmat_from_raw(cfg, outputs, data) print(sm_confmat) sm_acc = get_acc_from_confmat(sm_confmat) sm_recall = get_recall_from_confmat(sm_confmat) sm_precision = get_precision_from_confmat(sm_confmat) # ap = get_ap_from_raw(outputs,data) # mAP = get_map_from_ap(ap,data) # similarity metric features = get_features_from_raw(outputs) print("simmat") t = Timer() t.tic() sim_mat = get_simmat_from_features(features) print(sim_mat.shape) t.toc() print(t) print("end [simmat]") # store results results = edict() results.acc = acc results.recall = recall results.precision = precision if cfg.smoothing: results.sm_acc = sm_acc results.sm_recall = sm_recall results.sm_precision = sm_precision # results.ap = ap # results.mAP = mAP print(results) results.features = features results.sim_mat = sim_mat return results