def mapDatasets(sources): maps = [[] for x in range(3)] maps[1].append("background") sources = sorted(sources) for source in sources: if source == PASCAL: mapPASCAL(maps, source) elif source == COCO: mapCOCO(maps, source) maps[0] = [(idx, ) + _map for idx, _map in enumerate(maps[0])] paths = [PATH.DATA.IMG_MAP, PATH.DATA.CLS_MAP, PATH.DATA.IMG_CLS_MAP] for _map, _path in zip(maps, paths): saveObject(_map, _path) return maps
def getFieldmaps(self, file_path=None): if self.field_maps is not None: return self.field_maps # load/generate field maps file_path = PATH.MODEL.FIELDMAPS if file_path is None else file_path if os.path.isfile(file_path): print ("Fieldmaps: loading from the stored object file ...") field_maps = loadObject(file_path) else: print ("Fieldmaps: generating ...") field_maps = stackedFieldmaps(self.model) saveObject(field_maps, file_path) print ("Fieldmaps: saved at {}".format(file_path)) self.field_maps = field_maps return self.field_maps
def _finish(self): if not exists(PATH.DATA.CLS_MAP): print("Class map: saved.") saveObject(class_map, PATH.DATA.CLS_MAP) if not exists(PATH.DATA.IMG_CLS_MAP): print("Image class map: saved") saveObject(img_cls_map, PATH.DATA.IMG_CLS_MAP) if DESCRIBE_DATA: print("Data statistics: saved") saveObject(des, PATH.DATA.STATISTICS.DATA) saveObject(sortAsClass(des), PATH.DATA.STATISTICS.REPORT)
def mapImageNet(maps=None): if maps: img_ids, _, img_cls_map = maps else: img_ids = loadObject(PATH.DATA.IMG_MAP) img_cls_map = loadObject(PATH.DATA.IMG_CLS_MAP) img_dir = PATH.DATA.IMAGENET.IMGS data = getFilesInDirectory(img_dir, "jpg") data = [(x[x.rfind('/') + 1:-4], IMAGENET) for x in data] idx = len(img_ids) for _data in data: img_ids.append((idx, ) + _data) img_id = _data[0] cls = getClassID(img_id.split('_')[0]) img_cls_map.append([img_id, cls]) idx += 1 saveObject(img_ids, PATH.DATA.IMG_MAP) saveObject(img_cls_map, PATH.DATA.IMG_CLS_MAP)
def reportMatchResults(matches): print("Report Matches: begin...") iou_thres = CONFIG.DIS.IOU_THRESHOLD top = CONFIG.DIS.TOP file_path = PATH.OUT.UNIT_MATCHES saveObject(matches, file_path) unit_matches = filterMatches(matches, top, iou_thres) concept_matches = rearrangeMatches(matches) file_path = PATH.OUT.CONCEPT_MATCHES saveObject(concept_matches, file_path) concept_matches = filterMatches(concept_matches, top, iou_thres) print("Report Matches: filtering finished.") if CONFIG.DIS.REPORT_TEXT: file_path = PATH.OUT.UNIT_MATCH_REPORT reportMatchesInText(unit_matches, file_path, "unit") file_path = PATH.OUT.CONCEPT_MATCH_REPORT reportMatchesInText(concept_matches, file_path, "concept") print("Report Matches: saved") if CONFIG.DIS.REPORT_FIGURE: reportMatchesInFigure(unit_matches)
cls_layers = model.getLayers()[-2:] probe_layers += cls_layers data = {} while bl: batch = bl.nextBatch() imgs = batch[1] activ_maps = model.getActivMaps(imgs, probe_layers) conv_activs, cls_activs = splitDic(activ_maps, cls_layers) conv_attrs = activAttrs(conv_activs) integrate(data, {**conv_attrs, **cls_activs}) bl.reportProgress() data = {k : np.asarray(v) for k, v in data.items()} saveObject(data, data_path) print ("Correlation: analysis begin") #split activation attributes series # conv_attrs, cls_attrs = splitDic(data, ["prob", "fc8"]) # conv_attrs = splitAttr(conv_attrs) # for attr_idx, attr_name in zip(conv_attrs, ATTRS): # _attrs = {**conv_attrs[attr_idx], **cls_attrs} # corrs = {} # for unit_1, unit_2 in paired(_attrs): # name = "{}-{}".format(unit_1, unit_2) # x = _attrs[unit_1] # y = _attrs[unit_2] # coef, _ = correlation(x, y) # if unit_1 not in corrs:
anno_ids = patch_data[1][:input_num] activ_maps_p = model.getActivMaps(imgs, probe_layers) activ_attrs_p = activAttrs(activ_maps_p) patch_data = [ _patch_data[input_num:] for _patch_data in patch_data ] updateActivAttrDiffs(attr_diffs, activ_attrs_p, anno_ids, patched=True) bl.reportProgress() attr_change_aves, attr_changes = computeAttrChange(attr_diffs) saveObject(attr_changes, data_path) else: print("Find existing verification data, beginning analysis.") attr_changes = loadObject(data_path) # analysis for assessing if identification results correct concept_matches = loadObject(PATH.OUT.IDE.DATA.CONCEPT) data_x = {} data_y = {} for ccp, unit, match in nested(concept_matches, depth=2): try: mean_change = attr_changes[unit][ccp][0] if not np.isfinite(mean_change): continue if ccp not in data_x:
batch = bl.nextBatch() imgs = batch[1] annos = batch[2] activ_maps = model.getActivMaps(imgs, probe_layers) activ_maps = splitDict(activ_maps, num) params = [(amap, field_maps, annos, quans) for amap in activ_maps] with Pool() as pool: batch_matches = pool.starmap(process, params) print ("Combine matches...") for batch_match in batch_matches: for idx, bm in enumerate(batch_match): matches[idx] = combineMatches(matches[idx], bm) bl.reportProgress() saveObject(matches, file_path) else: matches = loadObject(file_path) print("Find existing match results, thus skipping to analyse results.") # match results analysis # overall comparison # with Pool() as pool: # means = pool.starmap(dictMean, [(m, 0) for m in matches]) # labels = {'x' : 'quantile', 'y' : 'mean IoU'} # plt = plotFigure(quans, means, title="means v.s. quantiles", show=True) # saveFigure(plt, os.path.join(plot_path, "overall.png")) plot_path = os.path.join(PATH.OUT.ROOT, "activ_thres") # sort and comparison