def calculate_medium_box(boxes): conf_sum = reduce(lambda t, b: t + b.score, boxes, 0) aggregation = {} for name in ['x1', 'y1', 'x2', 'y2']: aggregation[name] = reduce(lambda t, b: t + b.__dict__[name] * b.score, boxes, 0) / conf_sum new_box = al.AnnoRect(**aggregation) new_box.classID = boxes[0].classID new_box.score = conf_sum / len(boxes) return new_box
def annotation_labelbox_from_result(result, columns, url_prefix): annotation = AnnotationLib.Annotation() image_url = get_path_from_s3(result[columns['annotation']], url_prefix) boxes = parse_boxes(result[columns['Answer.results']]) for box in boxes: annotation.rects.append(AnnotationLib.AnnoRect(*box)) # remove parameters from filename param_idx = image_url.index('&'); annotation.imageName = image_url[:param_idx]; return annotation
def do(image, anno=None): """ Does the rotation for image and rectangles for 90 degrees counterclockwise. Args: image (Image): The target image to rotate. anno (Annotation): The annotations to be rotated with the image. Returns (tuple): Rotated image and annotations for it. """ w = image.shape[1] new_image = imrotate(image, 90, reshape=True) if anno is not None: anno.rects = [al.AnnoRect(r.y1, w - r.x2, r.y2, w - r.x1) for r in anno.rects] return new_image, anno
def get_cell_grid(cell_width, cell_height, region_size): cell_regions = [] for iy in xrange(cell_height): for ix in xrange(cell_width): cidx = iy * cell_width + ix ox = (ix + 0.5) * region_size oy = (iy + 0.5) * region_size r = al.AnnoRect(ox - 0.5 * region_size, oy - 0.5 * region_size, ox + 0.5 * region_size, oy + 0.5 * region_size) r.track_id = cidx cell_regions.append(r) return cell_regions
def annotation_labelact_from_result(result, columns, url_prefix): annotation = AnnotationLib.Annotation() image_url = get_path_from_s3(result[columns['annotation']], url_prefix) # remove parameters from filename param_idx = image_url.index('&'); image_url_split = image_url[:param_idx].split(','); assert(len(image_url_split) == 5); annotation.imageName = image_url_split[0]; rect = AnnotationLib.AnnoRect(); rect.x1 = int(image_url_split[1]) rect.y1 = int(image_url_split[2]) rect.x2 = int(image_url_split[3]) rect.y2 = int(image_url_split[4]) result_split = result[columns['Answer.results']].split(','); assert(len(result_split) == 1 + 1 + 4 + 4); # male/female gender_idx_to_val = {0: ATTR_VAL_GENDER_MALE, 1: ATTR_VAL_GENDER_FEMALE}; ptype_idx_to_val = {0: ATTR_VAL_PTYPE_SALES, 1: ATTR_VAL_PTYPE_CUST}; sales_act_idx_to_val = {0: ATTR_VAL_ACT_SALES_INT, 1: ATTR_VAL_ACT_SALES_CLEAN, 2: ATTR_VAL_ACT_SALES_OTHER}; cust_act_idx_to_val = {0: ATTR_VAL_ACT_CUST_QUEUE, 1: ATTR_VAL_ACT_CUST_INT, 2: ATTR_VAL_ACT_CUST_BROWSE, 3: ATTR_VAL_ACT_CUST_OTHER}; gender_idx = int(result_split[-4]); rect.at["gender"] = gender_idx_to_val[gender_idx]; # sales/cust ptype_idx = int(result_split[-3]); rect.at["ptype"] = ptype_idx_to_val[ptype_idx]; if ptype_idx == 0: # interact/clean/other act_idx = int(result_split[-2]); rect.at["act"] = sales_act_idx_to_val[act_idx]; else: # queue/interact/browse/other act_idx = int(result_split[-1]); rect.at["act"] = cust_act_idx_to_val[act_idx]; annotation.rects.append(rect); return annotation
def pal2al(_annolist): annotations = [] for _a in _annolist.annotation: anno = AnnotationLib.Annotation() anno.imageName = _a.imageName anno.rects = [] for _r in _a.rect: rect = AnnotationLib.AnnoRect() rect.x1 = _r.x1 rect.x2 = _r.x2 rect.y1 = _r.y1 rect.y2 = _r.y2 if _r.HasField("id"): rect.id = _r.id if _r.HasField("track_id"): rect.track_id = _r.track_id if _r.HasField("score"): rect.score = _r.score if _r.HasField("distance3d"): rect.distance3d = _r.distance3d if _r.HasField("width3d"): rect.width3d = _r.width3d if _r.HasField("height3d"): rect.height3d = _r.height3d if _r.HasField("length3d"): rect.length3d = _r.length3d anno.rects.append(rect) annotations.append(anno) return annotations
def pal2al(_annolist): #annotations = []; annotations = AnnotationLib.AnnoList() for adesc in _annolist.attribute_desc: annotations.attribute_desc[adesc.name] = adesc print "attribute: ", adesc.name, adesc.id for valdesc in adesc.val_to_str: annotations.add_attribute_val(adesc.name, valdesc.s, valdesc.id) attribute_name_from_id = { adesc.id: aname for aname, adesc in annotations.attribute_desc.iteritems() } attribute_dtype_from_id = { adesc.id: adesc.dtype for aname, adesc in annotations.attribute_desc.iteritems() } for _a in _annolist.annotation: anno = AnnotationLib.Annotation() anno.imageName = _a.imageName anno.rects = [] for _r in _a.rect: rect = AnnotationLib.AnnoRect() rect.x1 = _r.x1 rect.x2 = _r.x2 rect.y1 = _r.y1 rect.y2 = _r.y2 if _r.HasField("id"): rect.id = _r.id if _r.HasField("track_id"): rect.track_id = _r.track_id if _r.HasField("score"): rect.score = _r.score for _at in _r.attribute: try: cur_aname = attribute_name_from_id[_at.id] cur_dtype = attribute_dtype_from_id[_at.id] except KeyError as e: print "attribute: ", _at.id print e assert (False) if cur_dtype == AnnotationLib.AnnoList.TYPE_INT32: rect.at[cur_aname] = _at.val elif cur_dtype == AnnotationLib.AnnoList.TYPE_FLOAT: rect.at[cur_aname] = _at.fval elif cur_dtype == AnnotationLib.AnnoList.TYPE_STRING: rect.at[cur_aname] = _at.strval else: assert (False) anno.rects.append(rect) annotations.append(anno) return annotations
def add_rectangles(H, orig_image, confidences, boxes, use_stitching=False, rnn_len=1, min_conf=0.1, show_removed=True, tau=0.25, show_suppressed=True): image = np.copy(orig_image[0]) boxes_r = np.reshape(boxes, (-1, H["grid_height"], H["grid_width"], rnn_len, 4)) confidences_r = np.reshape( confidences, (-1, H["grid_height"], H["grid_width"], rnn_len, H['num_classes'])) cell_pix_size = H['region_size'] all_rects = [[[] for _ in range(H["grid_width"])] for _ in range(H["grid_height"])] for n in range(rnn_len): for y in range(H["grid_height"]): for x in range(H["grid_width"]): classID = np.argmax(confidences_r[0, y, x, n, 1:]) + 1 bbox = boxes_r[0, y, x, n, :] abs_cx = int(bbox[0]) + cell_pix_size / 2 + cell_pix_size * x abs_cy = int(bbox[1]) + cell_pix_size / 2 + cell_pix_size * y w = bbox[2] h = bbox[3] conf = confidences_r[0, y, x, n, classID] all_rects[y][x].append( Rect(abs_cx, abs_cy, w, h, conf, classID)) all_rects_r = [r for row in all_rects for cell in row for r in cell] if use_stitching: from stitch_wrapper import stitch_rects acc_rects = stitch_rects(all_rects, tau) else: acc_rects = all_rects_r if show_suppressed: pairs = [(all_rects_r, (255, 0, 0))] else: pairs = [] pairs.append((acc_rects, (0, 255, 0))) for rect_set, color in pairs: for rect in rect_set: if rect.confidence > min_conf: cv2.rectangle(image, (rect.cx - int(rect.width / 2), rect.cy - int(rect.height / 2)), (rect.cx + int(rect.width / 2), rect.cy + int(rect.height / 2)), color, 2) rects = [] for rect in acc_rects: r = al.AnnoRect() r.x1 = rect.cx - rect.width / 2. r.x2 = rect.cx + rect.width / 2. r.y1 = rect.cy - rect.height / 2. r.y2 = rect.cy + rect.height / 2. r.score = rect.true_confidence # r.classID = rect.classID rects.append(r) return image, rects
annolist.append(anno) # load vatic tracks vatic_dump = pickle.load(open(opts.vatic_filename, "rb")) num_tracks = len(vatic_dump) print "number of tracks: ", num_tracks for tidx in xrange(0, num_tracks): vatic_boxes = vatic_dump[tidx]["boxes"] track_len = len(vatic_boxes) print "track ", tidx, ", track_len: ", track_len for bidx in xrange(0, track_len): if vatic_boxes[bidx].lost == 0: rect = AnnotationLib.AnnoRect() rect.id = tidx rect.x1 = vatic_boxes[bidx].xtl rect.y1 = vatic_boxes[bidx].ytl rect.x2 = vatic_boxes[bidx].xbr rect.y2 = vatic_boxes[bidx].ybr cur_frame = vatic_boxes[bidx].frame annolist[cur_frame].rects.append(rect) # save annolist fname_ext = os.path.basename(opts.vatic_filename) fname = os.path.splitext(fname_ext)[0] pal_filename = opts.output_dir + "/" + fname + ".pal" print "saving ", pal_filename
def inv(r): rotated_back = al.AnnoRect(width - r.y2, r.x1, width - r.y1, r.x2) rotated_back.score = r.score return rotated_back