def run(api_path, cloud_name, tags, output): if not os.path.isdir(output): os.makedirs(output) output = normal_dir(output) if api_path[-1] != "/": api_path += "/" product = read_yaml(api_path + "product.yaml") if not product: raise Exception("Read (%s) failed" % (api_path + "product.yaml")) api_yaml = read_yaml(api_path + "api.yaml") all_models = read_yaml(api_path + "models.yaml") all_tags = {i["name"]: i for i in product["tags"]} for tag in tags.split(","): tag = tag.strip().decode("utf8") if tag not in all_tags: raise Exception("Unknown tag(%s)" % tag) _, properties = generate_resource_properties( api_yaml, all_models, tag, read_yaml(api_path + tag + ".yaml")) write_file(output + tag + ".yaml", _generate_yaml(properties))
def run(api_path, cloud_name, tags, output): if not os.path.isdir(output): os.makedirs(output) output = normal_dir(output) api_path = normal_dir(api_path) cloud = _get_cloud_info(cloud_name) product = read_yaml(api_path + "product.yaml") if not product: raise Exception("Read (%s) failed" % (api_path + "product.yaml")) product_info = {"service_type": product["service_type"]} product_info.update(cloud) all_tags = {i["name"]: i for i in product["tags"]} tag_info = {} for tag in tags.split(","): tag = tag.strip().decode("utf8") if tag not in all_tags: raise Exception("Unknown tag(%s)" % tag) tag_info[tag] = all_tags[tag] _generate_yaml(api_path, product_info, tag_info, output)
def _generate_platform_yaml(api_path, product_info, tag_info, output): prefix = "%s_%s" % (product_info["cloud_full_name"], product_info["service_type"]) config = {"ansible": {}, "terraform": {}} for tag, info in tag_info.items(): custom_configs = read_yaml(api_path + tag + ".yaml") rn = get_resource_name(info, custom_configs) v = custom_configs.get("ansible") if v: config["ansible"][rn] = { "config": v, } v = custom_configs.get("terraform") if v: config["terraform"][rn] = { "config": v, "config_dir": api_path, "terraform_resource_name": "%s_%s" % (prefix, rn.lower()) } m = { "ansible": build_ansible_yaml, "terraform": build_terraform_yaml } for k, v in config.items(): m[k](v, output)
def _get_cloud_info(cloud_name): cloud = None m = read_yaml("clouds.yaml") for i in m["clouds"]: if cloud_name == i["cloud_half_full_name"]: cloud = i break else: raise Exception("Unknown cloud(%s)" % cloud_name) return cloud
def _generate_api_yaml(api_path, product_info, tag_info, output): r = [_render_product(product_info)] api_yaml = read_yaml(api_path + "api.yaml") all_models = read_yaml(api_path + "models.yaml") for tag, v in tag_info.items(): custom_configs = read_yaml(api_path + tag + ".yaml") api_info, properties = generate_resource_properties( api_yaml, all_models, tag, custom_configs ) r.extend( build_resource_config( api_info, properties, v, custom_configs, product_info["service_type"]) ) write_file(output + "api.yaml", r)
def run(config_file, cloud_name, tag, output): if not os.path.isdir(output): os.makedirs(output) output = normal_dir(output) api_path = os.path.dirname(config_file) + "/" product = read_yaml(api_path + "product.yaml") if not product: raise Exception("Read (%s) failed" % (api_path + "product.yaml")) api_yaml = read_yaml(api_path + "api.yaml") all_models = read_yaml(api_path + "models.yaml") all_tags = {i["name"]: i for i in product["tags"]} tag = tag.strip().decode("utf8") if tag not in all_tags: raise Exception("Unknown tag(%s)" % tag) _, properties = generate_resource_properties(api_yaml, all_models, read_yaml(config_file)) write_file(output + tag + "_desc.yaml", _generate_yaml(properties))
def _generate_yaml(api_path, product_info, tag_info, output): r = [_render_product(product_info)] platform_config = [] api_yaml = read_yaml(api_path + "api.yaml") all_models = read_yaml(api_path + "models.yaml") for tag, v in tag_info.items(): custom_configs = read_yaml(api_path + tag + ".yaml") api_info, properties = generate_resource_properties( api_yaml, all_models, tag, custom_configs) argv = { "config_dir": api_path, "api_info": api_info, "all_models": all_models, "properties": properties, "service_type": product_info["service_type"], "resource_name": _get_resource_name(v, custom_configs), "version": _get_version(api_info), "resource_desc": v.get("description", ""), "custom_configs": custom_configs, "cloud_full_name": product_info["cloud_full_name"], "cloud_short_name": product_info["cloud_short_name"], } r.extend(build_resource_config(**argv)) r.extend(build_resource_api_config(**argv)) platform_config.append(argv) write_file(output + "api.yaml", r) _generate_platform_yaml(platform_config, all_models, output)
def _build_example_render_info(f, module_name, cloud_short_name): tasks = None r = read_yaml(f) if len(r) == 1 and isinstance(r[0], dict) and "tasks" in r[0]: tasks = r[0].get("tasks") else: raise Exception("the format of example is not correct") if not tasks: raise Exception("no tasks in the example file") task = None for i in tasks: if module_name in i: task = i tasks.remove(i) break else: raise Exception("can't find the task(%s)" % module_name) v = { "example_description": r[0].get("name"), "task_name": module_name, "task_code": _build_module_params(task[module_name], 4), "task_description": task.get("name") } if tasks: d = [] for t in tasks: module = "" for k in t: if k.startswith(cloud_short_name): module = k break else: continue d.append({ "name": module, "register": t.get("register"), "description": t.get("name"), "code": _build_module_params(t[module], 6), }) if d: v["depends"] = d v["has_depends"] = True return v
def import_db_from_yaml(): data_tree = [] for index, tree in enumerate(os.walk(DATA_PATH)): if index > 0: data_tree.append(tree) for data in data_tree: database = data[0].split('/')[-1] connection = rdb_connector() try: RDB.db_create(database).run(connection) logger.info("Database `%s` setup completed", database) except RqlRuntimeError: logger.error("Database `%s` already exists", database) except RqlDriverError as e: logger.error(e) exit() finally: connection.close() tables = data[2] for table in tables: table_name = os.path.splitext(table)[0] table_data = read_yaml(DATA_PATH + '/' + database + '/' + table) connection = rdb_connector() try: RDB.db(database).table_create(table_name).run(connection) RDB.db(database).table(table_name).insert(table_data).run( connection) logger.info("Table `%s` setup in database `%s` completed", table_name, database) except RqlRuntimeError: logger.error("Table `%s` in database `%s` already exists", table_name, database) except RqlDriverError as e: logger.error(e) exit() finally: connection.close()
def test(model, conf_thres=0.001, nms_thres=0.5): device = "cuda:0" classes = ['person'] if model is None: pass else: device = model.device # get model device data_parameters = read_yaml(model.data_yaml) classes = data_parameters['classes'] batch_size = model.batch_size # Dataset data_parameters[ "data_path"] = '/home/lingc1/data/sports-training-data/player_detection/validate_dataset_5k_half_size' data_set = Yolov3Data(data_parameters, None, index_file='val_test') dataloader = DataLoader(data_set, batch_size, shuffle=False, num_workers=0, collate_fn=data_set.collate_fn) print(('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP', 'F1')) loss, p_80, r_80, f1_80, mp_80, mr_80, map_80, mf1_80 = 0., 0., 0., 0., 0., 0., 0., 0. seen = 0 images_num = 0 output_results = "" class_recs = {} nc = len(classes) names = classes det_lines = [] imagenames = [] npos_cls = {} for i in range(nc): npos_cls[i] = 0 class_recs[i] = {} for i, (imgs, targets) in enumerate(dataloader): imgs = imgs.to(device) targets = targets.to(device) target_number = len(targets) preds = model.inference(imgs, None) output = non_max_suppression(preds, conf_thres=conf_thres, nms_thres=nms_thres) true_targets = targets[torch.sum(targets[:, 1:6], 1) != 0] # Statistics per image # remove the targets that fills 0 for data distribution. true_targets = targets[torch.sum(targets[:, 1:6], 1) != 0] # npos += len(true_targets) for si, pred in enumerate(output): images_num += 1 if pred is not None and len(pred) > 0: # Rescale boxes from 416 to true image size # pred[:, :4] = scale_coords(imgs.shape[2:], pred[:, :4], im0_shape).round() for *xyxy, conf, cls_conf, cls in pred: x = int(xyxy[0]) y = int(xyxy[1]) w = int((xyxy[2] - xyxy[0]).round()) h = int((xyxy[3] - xyxy[1]).round()) output_line = "{:s},{:d},{:d},{:d},{:d},{:f}\n".format( Path(str(si)).name, x, y, w, h, conf) det_lines.append(output_line) output_results = output_results + output_line labels = true_targets[true_targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 4].tolist() if nl else [] # target class seen += 1 imagename = os.path.splitext(Path(str(si)).name)[0] imagenames.append(imagename) if nl: unique_classes = np.unique(tcls).astype('int32') # detected = [] for cls in unique_classes: cls_idx = np.where(tcls == cls)[0] tcls_tensor = labels[:, 4] tcls_tensor = tcls_tensor[cls_idx] npos_cls[cls] += len(tcls_tensor) # target boxes tbox = xywh2xyxy(labels[:, 0:4]) tbox = tbox[cls_idx] tbox[:, [0, 2]] *= imgs.shape[3] tbox[:, [1, 3]] *= imgs.shape[2] bbox = np.array(tbox.cpu().numpy().round(), dtype=int) det = [False] * len(tcls_tensor) difficult = np.array(det) class_recs[cls][imagename] = { 'bbox': bbox, 'difficult': difficult, 'det': det } p_80, r_80, ap_80, f1_80 = [], [], [], [] p_50, r_50, ap_50, f1_50 = [], [], [], [] ap_80_iou = 0.8 ap_50_iou = 0.5 class_recs_80 = copy.deepcopy(class_recs) class_recs_50 = copy.deepcopy(class_recs) for cls in range(nc): rec_cls, prec_cls, ap_cls = voc_eval(det_lines, npos_cls[cls], imagenames, class_recs_80[cls], ovthresh=ap_80_iou, use_07_metric=True) f1_cls = 2 * prec_cls[-1] * rec_cls[-1] / (prec_cls[-1] + rec_cls[-1] + 1e-16) p_80.append(prec_cls[-1]) r_80.append(rec_cls[-1]) ap_80.append(ap_cls) f1_80.append(f1_cls) print("AP 80") print("person ap is: %.6f" % (ap_cls * 100)) print("recall is %.6f" % (rec_cls[-1] * 100)) print("precision is %.6f" % (prec_cls[-1] * 100)) rec_cls, prec_cls, ap_cls = voc_eval(det_lines, npos_cls[cls], imagenames, class_recs_50[cls], ovthresh=ap_50_iou, use_07_metric=True) f1_cls = 2 * prec_cls[-1] * rec_cls[-1] / (prec_cls[-1] + rec_cls[-1] + 1e-16) p_50.append(prec_cls[-1]) r_50.append(rec_cls[-1]) ap_50.append(ap_cls) f1_50.append(f1_cls) print("AP 50") print("person ap is: %.6f" % (ap_cls * 100)) print("recall is %.6f" % (rec_cls[-1] * 100)) print("precision is %.6f" % (prec_cls[-1] * 100)) mp_80, mr_80, map_80, mf1_80 = np.mean(p_80) * 100, np.mean( r_80) * 100, np.mean(ap_80) * 100, np.mean(f1_80) * 100 mp_50, mr_50, map_50, mf1_50 = np.mean(p_50) * 100, np.mean( r_50) * 100, np.mean(ap_50) * 100, np.mean(f1_50) * 100 # Print results all_target_sum = 0 for _, cls_npos in npos_cls.items(): all_target_sum += cls_npos pf = '%20s' + '%10.6g' * 6 # print format # print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1), end='\n\n') print(pf % ('all', seen, all_target_sum, mp_80, mr_80, map_80, mf1_80), end='\n\n') print(pf % ('all', seen, all_target_sum, mp_50, mr_50, map_50, mf1_50), end='\n\n') # Print results per class # if nc > 1: # for i, c in enumerate(ap_class): # print(pf % (names[c], seen, npos_cls[c], p[i], r[i], ap[i], f1[i])) if nc > 1: for i in range(nc): print(pf % (names[i], seen, npos_cls[i], p_80[i], r_80[i], ap_80[i], f1_80[i])) print(pf % (names[i], seen, npos_cls[i], p_50[i], r_50[i], ap_50[i], f1_50[i])) # Return results maps = np.zeros(nc) # for i, c in enumerate(ap_class): # maps[c] = ap[i] for i in range(nc): maps[i] = ap_80[i] return (mp_80, mr_80, map_80, mf1_80, loss / len(dataloader), mp_50, mr_50, map_50, mf1_50), maps