def initial_preparation(self, box_list_batch): category_all, offset_all, index_all = [], [], [] for box_list in box_list_batch: assert box_list.mode == 'x1y1x2y2' iou_matrix = boxlist_iou( box_list, self.anchor_cat) # shape: (num_gt, num_anchor) # shape: (num_anchor,), record the matched gt index for each dt, -1 for background, -2 for ignored. matched_index = match(iou_matrix, self.cfg.match_iou_thre, self.cfg.match_iou_thre) box_list = copy.deepcopy(box_list) # '.clamp()' here to make index operation available, this won't affect 'matched_index' box_list_matched = box_list[matched_index.clamp(min=0)] offset = encode(box_list_matched.box, self.anchor_cat.box) category = box_list_matched.label category[matched_index == -1] = 0 # Background (negative examples) category[matched_index == -2] = -1 # ignore indices that are between thresholds offset_all.append(offset) category_all.append(category) index_all.append(matched_index) category_all = torch.cat(category_all, dim=0).int() offset_all = torch.cat(offset_all, dim=0) index_all = torch.cat(index_all, dim=0) return category_all, offset_all, index_all
def get_stats(host, metric): only_latest = False cf = request.args.get('cf', 'AVERAGE') step = request.args.get('step', 15) start = request.args.get('start') if start is None: only_latest = True start = '-%s' % step end = -1 else: end = request.args.get('end', -1) ret = cache.read(str(host), str(metric), str(cf).upper(), int(step), int(start), int(end)) if only_latest: return encode([ret[0]] + [[i for i in ret[1] if i[1] is not None][-1]]) else: return encode(ret)
def main(): platforminfo = get_platform_info() metric_list = load_metric_list() platforminfo.update(metric_list) mserver_host, mserver_port = global_config["monitor_server"].split(":") mserver_port = int(mserver_port) rpc_client = xmlrpclib.ServerProxy("http://%s:%d" % (mserver_host, mserver_port)) try: retcode = rpc_client.signIn(encode(platforminfo)) assert retcode == 1 logger.info("signed in on server %s:%d" % (mserver_host, mserver_port)) except Exception, e: logger.exception("") raise AgentException, "sign in on %s:%d failed" % (mserver_host, mserver_port)
def getStats(self, host, metric, stat, resolution, start, end): res = '' try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(self.server_addr) req = encode((host, metric, stat, resolution, start, end)) sock.send('%s\r\n' % req) while True: chunk = sock.recv(self.BUFSIZE) if not chunk: break res += chunk sock.close() except socket.error, e: print e logger.exception('') sock.close() raise e
def make_dataset(self, pth, extensions=IMG_EXTENSIONS_): """ This method creates dataset from folder with images. Filename of each image file is used as a label for sample. Each character in label encoded as an index of total character list (indices start from 1, thus 0-index is reserved for blank character). :param pth: Path to the dataset :param chars: Total list of characters :param extensions: List of allowed extensions :return: List of tuples of path to image file and its encoded label """ assert pth is not None and self.chars is not None images = [] dir = os.path.expanduser(pth) for target in sorted(os.listdir(dir)): d = os.path.join(dir, target) if has_file_allowed_extension(target, extensions): fname = os.path.splitext(target)[0] # item = (d, text_to_labels(fname, chars)) item = (d, utils.encode(fname, self.chars)) images.append(item) return images
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR, last_epoch - 1) if config.ATTENTION.ENABLE: criterion = torch.nn.NLLLoss() else: criterion = torch.nn.CTCLoss() # 训练 best_acc = 0.0 for epoch in range(last_epoch, config.TRAIN.END_EPOCH): model.train() for i, (inp, idx) in enumerate(train_loader): # 前馈,计算loss inp = inp.to(device) labels = get_batch_label(train_dataset, idx) batch_size = inp.size(0) text, length = encode(config.DICT, labels) preds = model(inp, length).cpu() preds_size = torch.IntTensor([preds.size(0)] * batch_size) if config.ATTENTION.ENABLE: loss = criterion(preds, text) else: loss = criterion(preds, text, preds_size, length) # 反馈 optimizer.zero_grad() loss.backward() optimizer.step() if i % config.PRINT_FREQ == 0: print("epoch:{} step:{} loss:{} lr:{}".format( epoch, i, loss.item(), lr_scheduler.get_lr())) # 每个epoch更新学习率
def collectAndSend(self): self._send(self._channel, encode(self.collector.doCollect()))
def get_invite_code(self): """获取分享code """ return utils.encode(str(self.id), utils.INVITE_CODE_KEY)
def compute_paa(self, box_list_batch, c_init_batch, score_batch, index_init_batch): bs = len(box_list_batch) c_init_batch = c_init_batch.reshape(bs, -1) score_batch = score_batch.reshape(bs, -1) index_init_batch = index_init_batch.reshape(bs, -1) num_anchor_per_fpn = [ len(anchor_per_fpn.box) for anchor_per_fpn in self.anchors ] device = score_batch.device final_c_batch, final_offset_batch = [], [] for i in range(len(box_list_batch)): box_list = box_list_batch[i] assert box_list.mode == "x1y1x2y2", 'box_list mode is incorrect' c_gt, box_gt = box_list.label, box_list.box c_init = c_init_batch[i] score = score_batch[i] index_init = index_init_batch[i] assert c_init.shape == index_init.shape final_c = torch.zeros(self.anchor_cat.box.shape[0], dtype=torch.long).to( device) # '0' represents background final_box_gt = torch.zeros_like(self.anchor_cat.box) for gt_i in range(box_gt.shape[0]): candi_i_per_gt = [] start_i = 0 for j in range(len(num_anchor_per_fpn)): end_i = start_i + num_anchor_per_fpn[j] score_per_fpn = score[start_i:end_i] index_init_per_fpn = index_init[start_i:end_i] # get the matched anchor index for a certain gt in a certain fpn matched_i = (index_init_per_fpn == gt_i).nonzero()[:, 0] matched_num = matched_i.numel() if matched_num > 0: _, topk_i = score_per_fpn[matched_i].topk( min(matched_num, self.cfg.fpn_topk), largest=False) topk_i_per_fpn = matched_i[topk_i] candi_i_per_gt.append(topk_i_per_fpn + start_i) start_i = end_i if candi_i_per_gt: candi_i_per_gt = torch.cat(candi_i_per_gt) # only if there are more than 1 candidate, gmm would be done if candi_i_per_gt.numel() > 1: candi_score = score[candi_i_per_gt] candi_score, candi_index = candi_score.sort() candi_score = candi_score.reshape(-1, 1).cpu().numpy() gmm = skm.GaussianMixture( n_components=2, weights_init=[0.5, 0.5], means_init=[[candi_score.min()], [candi_score.max()]], precisions_init=[[[1.0]], [[1.0]]]) gmm.fit(candi_score) gmm_component = gmm.predict(candi_score) gmm_score = gmm.score_samples(candi_score) gmm_component = torch.from_numpy(gmm_component).to( device) gmm_score = torch.from_numpy(gmm_score).to(device) fg = gmm_component == 0 if fg.nonzero().numel() > 0: _, fg_max_i = gmm_score[fg].max( dim=0) # Fig 3. (c) is_pos = candi_index[:fg_max_i + 1] else: is_pos = candi_index # just treat all samples as positive for high recall. else: is_pos = 0 # if there is only one candidate, treat it as positive pos_i = candi_i_per_gt[is_pos] final_c[pos_i] = c_gt[gt_i].reshape(-1, 1) final_box_gt[pos_i] = box_gt[gt_i].reshape(-1, 4) # 'neg_i' and 'pos_i' derives from 'candi_i_per_gt' derives from 'matched_i' derives from 'index_init' final_offset = encode(final_box_gt, self.anchor_cat.box) final_c_batch.append(final_c) final_offset_batch.append(final_offset) final_c_batch = torch.cat(final_c_batch, dim=0).int() final_offset_batch = torch.cat(final_offset_batch, dim=0) return final_c_batch, final_offset_batch
#ofile = curdir + os.path.sep + 'mon_agent.out' #pfile = curdir + os.path.sep + 'mon_agent.pid' ofile = '/tmp/mon_agent.out' pfile = '/tmp/mon_agent.pid' logfile = '/tmp/mon_agent.log' ################### # merge everything ################### all = { 'ofile' : ofile, 'pfile' : pfile, 'logfile' : logfile, 'basic_info' : basic_info, 'metric_groups' : metric_groups, 'remote_servers': remote_servers, 'listen_channel': listen_channel, 'collect_interval' : collect_interval } if __name__ == '__main__': config_path = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + \ 'agent_default_config' fp = file(config_path, 'w+') s = encode(all) fp.write(s) fp.close()