def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] it = copy.copy(iterator) all_gt_label = [] all_score = [] for idx, batch in enumerate(it): print("processing idx: {}".format(idx)) batch = self.converter(batch, device=self.device) imgs, bbox, labels = batch if imgs is None: continue xp = chainer.cuda.get_array_module(imgs) imgs = chainer.Variable(imgs) bbox = chainer.Variable(bbox) if bbox.shape[1] != config.BOX_NUM[self.database]: print("error box num {0} != {1}".format( bbox.shape[1], config.BOX_NUM[self.database])) continue scores = target.predict(imgs, bbox) # R', class_num scores = scores.reshape(labels.shape[0], labels.shape[1], labels.shape[2]) # shape = B,F,Y labels = chainer.cuda.to_cpu(labels) # B, F, Y scores = chainer.cuda.to_cpu(scores) # B, F, Y labels = np.maximum.reduce(labels, axis=1) # B, Y scores = np.maximum.reduce(scores, axis=1) # B, Y all_gt_label.extend(labels) all_score.extend(scores) all_gt_label = np.asarray(all_gt_label) # shape = (N, 5) all_score = np.asarray(all_score) # shape = (N, 5) all_gt_label = np.transpose(all_gt_label) # 5, N all_score = np.transpose(all_score) # 5, N report = defaultdict(dict) reporter = Reporter() reporter.add_observer("main", target) summary = DictSummary() for idx, score in enumerate(all_score): AU = config.AU_INTENSITY_DICT[idx] gt_label = all_gt_label[idx] error = mean_squared_error(gt_label, score) pearson_correlation, _ = pearsonr(gt_label, score) report["mean_squared_error"][AU] = error report["pearson_correlation"][AU] = pearson_correlation summary.add({"pearson_correlation_avg": pearson_correlation}) summary.add({"mean_squared_error_avg": error}) observation = {} with reporter.scope(observation): reporter.report(report, target) reporter.report(summary.compute_mean(), target) return observation
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] tot_trail_time = [] for trail_time in range(self.trail_times): print("trail in {}".format(trail_time)) it = copy.copy(iterator) each_trail_time = [] for idx, batch in enumerate(it): if idx >= self.each_trail_iteration: break batch = self.converter(batch, device=self.device) imgs, _, _ = batch imgs = chainer.Variable(imgs) before_time = time.time() preds = target.predict(imgs) # R', class_num each_trail_time.append(time.time() - before_time) tot_trail_time.append(np.mean(np.array(each_trail_time))) mean_time_elapse = np.mean(tot_trail_time) standard_var_time_elapse = np.var(tot_trail_time) reporter = Reporter() reporter.add_observer("main", target) observation = { "mean": mean_time_elapse, "var": standard_var_time_elapse } print(observation) with reporter.scope(observation): reporter.report(observation, target) return observation
def __call__(self, x, t): y = self.predictor(x) if self.lastlayer == 1: # The number of last layer units = 1 loss = F.sigmoid_cross_entropy(y, t.reshape(len(t), 1)) accuracy = F.binary_accuracy(y, t.reshape(len(t), 1)) else: # The number of last layer units = 2 loss = F.softmax_cross_entropy(y, t) accuracy = F.accuracy(y, t) summary = F.classification_summary(y, t, beta=1.0) precision = summary[0] recall = summary[1] f_value = summary[2] reporter = Reporter() observer = object() reporter.add_observer('f_value:', observer) observation = {} with reporter.scope(observation): reporter.report({'x': f_value}, observer) report( { 'loss': loss, 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f_value': f_value }, self) report(dict(('f_value_%d' % i, val) for i, val in enumerate(f_value)), self) return loss
def evaluate(self): iterator = self._iterators['main'] _target = self._targets["main"] if hasattr(iterator, 'reset'): iterator.reset_for_train_mode() it = iterator else: it = copy.copy(iterator) reporter = Reporter() reporter.add_observer("main", _target) summary = DictSummary() model = _target pred_labels_array = [] gt_labels_array = [] for idx, batch in enumerate(it): print("processing :{}".format(idx)) batch = self.converter(batch, self.device) feature, boxes, labels = batch # each is B,T,F,D if not isinstance(feature, chainer.Variable): feature = chainer.Variable(feature) labels = np.bitwise_or.reduce( chainer.cuda.to_cpu(labels)[:, -1, :, :], axis=1) # B, class_number pred_labels = model.predict( feature) # B, T, F, D -> B, class_number pred_labels_array.extend(pred_labels) gt_labels_array.extend(labels) gt_labels_array = np.stack(gt_labels_array) pred_labels_array = np.stack( pred_labels_array) # shape = all_N, out_size gt_labels = np.transpose(gt_labels_array) # shape = Y x frame pred_labels = np.transpose(pred_labels_array) #shape = Y x frame report_dict = dict() AU_id_convert_dict = self.AU_convert if self.AU_convert else config.AU_SQUEEZE for new_AU_idx, frame_pred in enumerate(pred_labels): if AU_id_convert_dict[new_AU_idx] in self.paper_use_AU: AU = AU_id_convert_dict[new_AU_idx] frame_gt = gt_labels[new_AU_idx] F1 = f1_score(y_true=frame_gt, y_pred=frame_pred) report_dict[AU] = F1 summary.add({"f1_frame_avg": F1}) observation = {} with reporter.scope(observation): reporter.report(report_dict, model) reporter.report(summary.compute_mean(), model) return observation
def evaluate(self): iterator = self._iterators['main'] _target = self._targets["main"] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) reporter = Reporter() reporter.add_observer("main", _target) summary = DictSummary() model = _target pred_labels_array = [] gt_labels_array = [] for idx, batch in enumerate(it): batch = self.converter(batch, self.device) images, bboxes, labels = batch # images shape = B*T, C, H, W; bboxes shape = B*T, F, 4; labels shape = B*T, F, 12 if not isinstance(images, chainer.Variable): images = chainer.Variable(images.astype('f')) bboxes = chainer.Variable(bboxes.astype('f')) roi_feature, labels = model.get_roi_feature(images,, bboxes, labels print("evaluate: Idx:{}".format(idx)) pred_labels = model.loss_head_module.predict(roi_feature) # B, T, F, 12 pred_labels = pred_labels[:, self.T-1, :, :] # B, F, D pred_labels = np.bitwise_or.reduce(pred_labels, axis=1) # B, class_number labels = labels[:, self.T-1, :, :] # B, F, D labels = np.bitwise_or.reduce(labels, axis=1) # B, class_number assert labels.shape == pred_labels.shape pred_labels_array.extend(pred_labels) gt_labels_array.extend(labels) if idx > 100: break gt_labels_array = np.stack(gt_labels_array) pred_labels_array = np.stack(pred_labels_array) # shape = all_N, out_size gt_labels = np.transpose(gt_labels_array) # shape = Y x frame pred_labels = np.transpose(pred_labels_array) #shape = Y x frame AU_id_convert_dict = self.AU_convert if self.AU_convert else config.AU_SQUEEZE for new_AU_idx, frame_pred in enumerate(pred_labels): if AU_id_convert_dict[new_AU_idx] in self.paper_use_AU: # AU = AU_id_convert_dict[new_AU_idx] frame_gt = gt_labels[new_AU_idx] F1 = f1_score(y_true=frame_gt, y_pred=frame_pred) summary.add({"f1_frame_avg": F1}) observation = {} with reporter.scope(observation): reporter.report(summary.compute_mean(), model) return observation
def train_support( iteration: int, reporter: chainer.Reporter, updater: chainer.training.Updater, first_hook: Callable[[Dict], None] = None, last_hook: Callable[[Dict], None] = None, ): observation: Dict = {} for i in range(iteration): with reporter.scope(observation): updater.update() if i % 100 == 0: print(observation) if i == 0: if first_hook is not None: first_hook(observation) print(observation) if last_hook is not None: last_hook(observation)
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] tot_trail_time = [] for trail_time in range(self.trail_times): print("trail in {}".format(trail_time)) it = copy.copy(iterator) each_trail_time = [] for idx, batch in enumerate(it): if idx >= self.each_trail_iteration: break batch = self.converter(batch, device=self.device) imgs, bbox, labels = batch imgs = chainer.Variable(imgs) bbox = chainer.Variable(bbox) if bbox.shape[1] != config.BOX_NUM[self.database]: print("error box num {0} != {1}".format( bbox.shape[1], config.BOX_NUM[self.database])) continue before_time = time.time() preds, scores = target.predict(imgs, bbox) # R', class_num each_trail_time.append(time.time() - before_time) tot_trail_time.append(np.mean(np.array(each_trail_time))) mean_time_elapse = np.mean(tot_trail_time) standard_var_time_elapse = np.var(tot_trail_time) reporter = Reporter() reporter.add_observer("main", target) observation = { "mean": mean_time_elapse, "var": standard_var_time_elapse } print(observation) with reporter.scope(observation): reporter.report(observation, target) return observation
parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU') parser.add_argument('--arch', '-a', choices=archs.keys(), default='alex_mini2', help='Convnet architecture') parser.add_argument('--val_batchsize', '-b', type=int, default=250, help='Validation minibatch size') #parser.add_argument('--loaderjob', '-j', type=int, # help='Number of parallel data loading processes') args = parser.parse_args() model = archs[args.arch]() serializers.load_npz(args.model, model) #cropwidth = 256 - model.insize #model.to_cpu() if args.gpu >= 0: chainer.cuda.get_device_from_id(args.gpu).use() # Make the GPU current model.to_gpu() mean = np.load(args.mean) test = PreprocessedDataset(args.test, args.root, mean, model.insize, False) test_iter = chainer.iterators.SerialIterator( test, args.val_batchsize, repeat=False, shuffle=False) reporter = Reporter() reporter.add_observer('test:', model) observation = {} with reporter.scope(observation): with chainer.using_config('train', False): with chainer.no_backprop_mode(): ev = extensions.Evaluator(test_iter, model, device=args.gpu) res = ev.evaluate() print(res)
def evaluate(self): iterator = self._iterators['main'] _target = self._targets["main"] if hasattr(iterator, 'reset'): iterator.reset_for_train_mode() it = iterator else: it = copy.copy(iterator) reporter = Reporter() reporter.add_observer("main", _target) summary = DictSummary() model = _target pred_labels_array = [] gt_labels_array = [] for batch in it: batch = convert(batch, self.device) for x, g, crf_pact_structure in zip(*batch): sample = crf_pact_structure.sample file_path = sample.file_path print("evaluate file:{0}".format(file_path)) pred_labels = model.predict( x, g, crf_pact_structure) # pred_labels is N x Y gt_labels = model.get_gt_label_one_graph( np, crf_pact_structure, is_bin=True) # return N x Y pred_labels_array.extend(pred_labels) gt_labels_array.extend(gt_labels) gt_labels_array = np.stack(gt_labels_array) pred_labels_array = np.stack( pred_labels_array) # shape = all_N, out_size if self.AU_convert: gt_labels_array = gt_labels_array.reshape( -1, config.BOX_NUM[self.database], len(self.paper_report_label)) # T, F, out_size pred_labels_array = pred_labels_array.reshape( -1, config.BOX_NUM[self.database], len(self.paper_report_label)) # T, F, out_size else: gt_labels_array = gt_labels_array.reshape( -1, config.BOX_NUM[self.database], len(config.AU_SQUEEZE)) # T, F, out_size pred_labels_array = pred_labels_array.reshape( -1, config.BOX_NUM[self.database], len(config.AU_SQUEEZE)) gt_labels = np.bitwise_or.reduce(gt_labels_array, axis=1) # shape = all_frame x Y pred_labels = np.bitwise_or.reduce(pred_labels_array, axis=1) # shape = all_frame x Y gt_labels = np.transpose(gt_labels) # shape = Y x frame pred_labels = np.transpose(pred_labels) #shape = Y x frame report_dict = dict() AU_id_convert_dict = self.AU_convert if self.AU_convert else config.AU_SQUEEZE for new_AU_idx, frame_pred in enumerate(pred_labels): if AU_id_convert_dict[new_AU_idx] in self.paper_use_AU: AU = AU_id_convert_dict[new_AU_idx] frame_gt = gt_labels[new_AU_idx] F1 = f1_score(y_true=frame_gt, y_pred=frame_pred) report_dict[AU] = F1 summary.add({"f1_frame_avg": F1}) observation = {} with reporter.scope(observation): reporter.report(report_dict, model) reporter.report(summary.compute_mean(), model) return observation
def evaluate(self): iterator = self._iterators['main'] _target = self._targets["main"] if hasattr(iterator, 'reset'): iterator.reset_for_train_mode() it = iterator else: it = copy.copy(iterator) reporter = Reporter() reporter.add_observer("main", _target) # will fail to run? summary = DictSummary() video_gt_bin_dict = defaultdict(list) # key = video_id, value = predict bin list video_pred_bin_dict = defaultdict(list) # key = video_id, value = predict bin list video_pred_prob_dict = defaultdict(list) for batch in it: batch = convert(batch, self.device) for x, crf_pact_structure in zip(*batch): sample = crf_pact_structure.sample file_path = sample.file_path train_keyword = os.path.basename(os.path.dirname(file_path)) # train_keyword comes from file_path video_id = os.path.basename(file_path) if train_keyword not in self.target_dict: print("error {} not pre-trained".format(train_keyword)) continue target = self.target_dict[train_keyword] # choose the right predictor # pred_probs is N x (Y+1) pred_labels, pred_probs = target.predict(x, crf_pact_structure, is_bin=False) # pred_labels is N x 1, but open-crf predict only produce shape = N gt_labels = target.get_gt_label_one_graph(np, crf_pact_structure, is_bin=False) # return N x 1 assert pred_labels.ndim == 1 pred_bins = [] # pred_bins is labels in one video sequence gt_bins = [] # gt_bins is labels in one video sequence prod_prob_bins = [] for idx, pred_label in enumerate(pred_labels): # N times iterator, N is number of nodes pred_prob = pred_probs[idx] # probability is len = Y+1 pred_prob_bin = np.zeros(shape=(len(config.AU_SQUEEZE)+1), dtype=np.float32) # Y + 1 because pred=0 also have prob for pred_idx in range(pred_prob.shape[0]): if pred_idx == 0: pred_prob_bin[0] = pred_prob[0] # 第0位置上表示全都是0 else: AU = train_keyword.split("_")[pred_idx - 1] AU_idx = config.AU_SQUEEZE.inv[AU] pred_prob_bin[AU_idx + 1] = pred_prob[pred_idx] prod_prob_bins.append(pred_prob_bin) # list of Y + 1 pred_bin = np.zeros(shape=len(config.AU_SQUEEZE), dtype=np.int32) # shape = Y if pred_label > 0: AU = train_keyword.split("_")[pred_label - 1] AU_idx = config.AU_SQUEEZE.inv[AU] pred_bin[AU_idx] = 1 # CRF can only predict one label, translate to AU_squeeze length pred_bins.append(pred_bin) for gt_label in gt_labels: # N times iterator, N is number of nodes gt_bin = np.zeros(shape=len(config.AU_SQUEEZE), dtype=np.int32) # shape = Y if gt_label > 0: AU = train_keyword.split("_")[gt_label - 1] AU_idx = config.AU_SQUEEZE.inv[AU] gt_bin[AU_idx] = 1 gt_bins.append(gt_bin) pred_bins = np.asarray(pred_bins) # shape = N x Y (Y is AU_squeeze length) gt_bins = np.asarray(gt_bins) prod_prob_bins = np.asarray(prod_prob_bins) assert len(pred_bins) == len(sample.node_list) assert len(gt_bins) == len(sample.node_list) video_pred_bin_dict[video_id].append(pred_bins) # each pred_bins is shape = N x Y. but N of each graph is different video_gt_bin_dict[video_id].append(gt_bins) # each gt_bins is shape = N x Y. but N of each graph is different video_pred_prob_dict[video_id].append(prod_prob_bins) # each pred_probs = N x (Y+1) assert len(video_gt_bin_dict) == len(video_pred_bin_dict) # predict final is determined by vote video_pred_final = [] # shape = list of N x Y ,each N is different in each video video_gt_final = [] # shape = list of N x Y, each N is different for video_id, prod_prob_bins in video_pred_prob_dict.items(): prod_prob_bins_array = np.asarray(prod_prob_bins) # shape = U x N x (Y+1) , where U is different trainer number, this time N is the same cross diferent video prod_prob_bins_array = np.transpose(prod_prob_bins_array,(1,0,2)) # shape = N x U x (Y+1) prod_prob_bins_index = np.argmax(prod_prob_bins_array, axis=2) # shape = N x U choose the biggest Y index in last axis prod_prob_bins_array = np.max(prod_prob_bins_array, axis=2) # shape = N x U. each element is prob number choice_trainer_index = np.argmax(prod_prob_bins_array, axis=1) # shape = N, each element is which U is biggest pred_labels = prod_prob_bins_index[np.arange(len(prod_prob_bins_index)), choice_trainer_index] #shape = N, each element is correct Y pred_bins_array = np.zeros(shape=(pred_labels.shape[0], len(config.AU_SQUEEZE)),dtype=np.int32) for pred_idx, pred_label in enumerate(pred_labels): if pred_label != 0: pred_bins_array[pred_idx, pred_label - 1] = 1 video_pred_final.append(pred_bins_array) # list of N x Y # for gt_label part, we don't need vote, we only need element-wise or gt_bins_array = np.asarray(video_gt_bin_dict[video_id]) # # shape = U x N x Y , where U is different trainer number gt_bins_array = np.transpose(gt_bins_array, axes=(1,0,2)) # shape = N x U x Y video_gt_final.append(np.bitwise_or.reduce(gt_bins_array, axis=1)) # list shape = N x Y video_pred_final = np.concatenate(video_pred_final, axis=0) # shape = N' x Y ,where N' is total nodes of all frames cross videos video_gt_final = np.concatenate(video_gt_final, axis=0) # shape = N' x Y ,where N' is total nodes of all frames cross videos box_num = config.BOX_NUM[self.database] # we suppose that the n nodes order as frame, each frame have 9/8 boxes pred_labels_batch = video_pred_final.reshape(-1, box_num, len(config.AU_SQUEEZE)) # shape = (V x Frame) x box_num x Y gt_labels_batch = video_gt_final.reshape(-1, box_num, len(config.AU_SQUEEZE)) # shape = (V x Frame) x box_num x Y pred_labels_batch = np.bitwise_or.reduce(pred_labels_batch, axis=1) # shape = (V x Frame) x Y gt_labels_batch = np.bitwise_or.reduce(gt_labels_batch, axis=1) # shape = (V x Frame) x Y gt_labels_batch = np.transpose(gt_labels_batch, (1,0)) #shape = Y x N'. where N' = (V x Frame) pred_labels_batch = np.transpose(pred_labels_batch, (1,0)) #shape = Y x N' where N' = (V x Frame) report = defaultdict(dict) for gt_idx, gt_label in enumerate(gt_labels_batch): AU = config.AU_SQUEEZE[gt_idx] if AU in self.paper_use_AU: pred_label = pred_labels_batch[gt_idx] # met_E = get_F1_event(gt_label, pred_label) F1 = f1_score(y_true=gt_label, y_pred=pred_label) accuracy = accuracy_score(gt_label, pred_label) met_F = get_F1_frame(gt_label, pred_label) # roc = get_ROC(gt_label, pred_label) report["f1_frame"][AU] = met_F.f1f # report["AUC"][AU] = roc.auc report["accuracy"][AU] = accuracy summary.add({"f1_frame_avg": F1}) # summary.add({"AUC_avg": roc.auc}) summary.add({"accuracy_avg": accuracy}) observation = {} with reporter.scope(observation): reporter.report(report, _target) reporter.report(summary.compute_mean(), _target) print(observation) return observation
def evaluate(self): summary = reporter_module.DictSummary() iterator = self._iterators['main'] enc = self._targets['enc'] dec = self._targets['dec'] bound = self._targets['bound'] reporter = Reporter() observer = object() reporter.add_observer(self.default_name, observer) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) for batch in it: x, t, te = self.converter(batch, self.device) with chainer.no_backprop_mode(), chainer.using_config( 'train', False): if self.dataset == 'msd_bound': h, w, d = x.shape[2:] hc, wc, dc = self.crop_size if self.nested_label: y = cupy.zeros((1, 2 * (self.nb_labels - 1), h, w, d), dtype='float32') else: y = cupy.zeros((1, self.nb_labels, h, w, d), dtype='float32') s = 128 # stride ker = 256 # kernel size dker = dc # kernel size for depth ds = dker * 0.5 # stride for depth dsteps = int(math.floor((d - dker) / ds) + 1) steps = round((h - ker) / s + 1) for i in range(steps): for j in range(steps): for k in range(dsteps): xx = x[:, :, s * i:ker + s * i, s * j:ker + s * j, ds * k:dker + ds * k] hhs = enc(xx) yye, bbs = bound(hhs) yy = dec(hhs, bbs) y[:, :, s * i:ker + s * i, s * j:ker + s * j, ds * k:dker + ds * k] += yy.data # for the bottom depth part of the image xx = x[:, :, s * i:ker + s * i, s * j:ker + s * j, -dker:] hhs = enc(xx) yye, bbs = bound(hhs) yy = dec(hhs, bbs) y[:, :, s * i:ker + s * i, s * j:ker + s * j, -dker:] += yy.data else: hs = enc(x) ye, bs = bound(hs) y = dec(hs, bs) seg_loss = self.compute_loss(y, t) accuracy = self.compute_accuracy(y, t) dice = self.compute_dice_coef(y, t) mean_dice = mean_dice_coefficient(dice) weighted_loss = seg_loss observation = {} with reporter.scope(observation): reporter.report( { 'loss/seg': seg_loss, 'loss/total': weighted_loss, 'acc': accuracy, 'mean_dc': mean_dice }, observer) xp = cuda.get_array_module(y) for i in range(len(dice)): if not xp.isnan(dice.data[i]): reporter.report({'dc_{}'.format(i): dice[i]}, observer) summary.add(observation) return summary.compute_mean()
def evaluate(self): summary = reporter_module.DictSummary() iterator = self._iterators['main'] enc = self._targets['enc'] dec = self._targets['dec'] reporter = Reporter() observer = object() reporter.add_observer(self.default_name, observer) if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) for batch in it: x, t = self.converter(batch, self.device) with chainer.no_backprop_mode(), chainer.using_config('train', False): if self.dataset == 'msd_bound': # evaluation method for BRATS dataset only h, w, d = x.shape[2:] hc, wc, dc = self.crop_size if self.nested_label: y = cupy.zeros((1, 2*(self.nb_labels-1), h, w, d), dtype='float32') else: y = cupy.zeros((1, self.nb_labels, h, w, d), dtype='float32') hker = hc # kernel size hs = int(0.5*hker) # stride wker = wc wc = int(0.5*wker) dker = dc # kernel size for depth for i in range(2): for j in range(2): for k in range(2): xx = x[:, :, -i*hker:min(hker*(i+1), h), -j*wker:min(wker*(j+1), w), -k*dker:min(dker*(k+1), d)] hs = enc(xx) yy = dec(hs) y[:, :, -i*hker:min(hker*(i+1), h), -j*wker:min(wker*(j+1), w), -k*dker:min(dker*(k+1), d)] += yy.data else: hs = enc(x) y = dec(hs) seg_loss = self.compute_loss(y, t) accuracy = self.compute_accuracy(y, t) dice = self.compute_dice_coef(y, t) mean_dice = mean_dice_coefficient(dice) observation = {} with reporter.scope(observation): reporter.report({ 'loss/seg': seg_loss, 'acc': accuracy, 'mean_dc': mean_dice }, observer) xp = cuda.get_array_module(y) for i in range(len(dice)): if not xp.isnan(dice.data[i]): reporter.report({'dc_{}'.format(i): dice[i]}, observer) summary.add(observation) return summary.compute_mean()
def __init__(self, out_size=2): super(CXR16, self).__init__() with self.init_scope(): self.base = L.VGG16Layers() self.fc8 = L.Linear(4096,out_size) def __call__(self, x): h = self.base(x, layers=['fc7'])['fc7'] return self.fc8(h) import numpy as np from chainer.dataset import concat_examples from chainer.cuda import to_cpu from chainer import Reporter, report, report_scope reporter = Reporter() observer = object() reporter.add_observer('my_observer:', observer) gpu_id = 0 net = L.Classifier(CXR16()) x = [] t = [] x0,t0 = train_dataset[0] x.append(x0) t.append(t0) x = np.array(x) t = np.array(t) print(x.shape) print(net(x)) y = net(x,t) print("learned result: ",y)
def evaluate(self): iterator = self._iterators['main'] target = self._targets['main'] it = copy.copy(iterator) all_gt_label = [] all_pred_label = [] use_idx = sorted( filter(lambda idx: config.AU_SQUEEZE[idx] in self.paper_use_AU, list(config.AU_SQUEEZE.keys()))) print(list(config.AU_SQUEEZE[idx] for idx in use_idx)) npz_pred = [] npz_gt = [] npz_pred_score = [] for idx, batch in enumerate(it): batch = self.converter(batch, device=self.device) imgs, bbox, labels = batch imgs = chainer.Variable(imgs) bbox = chainer.Variable(bbox) if bbox.shape[1] != config.BOX_NUM[self.database]: print("error box num {0} != {1}".format(bbox.shape[1], config.BOX_NUM[self.database])) continue preds, scores = target.predict(imgs, bbox) # R', class_num try: preds = preds.reshape(labels.shape[0], labels.shape[1], labels.shape[2]) # shape = B, F, Y scores = scores.reshape(labels.shape[0], labels.shape[1], labels.shape[2]) # shape = B,F,Y except ValueError: print(preds.shape, labels.shape) print(scores.shape, labels.shape) continue preds = chainer.cuda.to_cpu(preds) # B, F, Y, where B always = 1 labels = chainer.cuda.to_cpu(labels) # B, F, Y scores = chainer.cuda.to_cpu(scores) # B, F, Y npz_pred.extend(preds) npz_gt.extend(labels) npz_pred_score.extend(scores) preds = np.bitwise_or.reduce(preds, axis=1) # shape = B, Y gt_labels = np.bitwise_or.reduce(labels, axis=1) # shape = B, Y all_gt_index = set() pos_pred = np.nonzero(preds) pos_gt_labels = np.nonzero(gt_labels) all_gt_index.update(list(zip(*pos_pred))) all_gt_index.update(list(zip(*pos_gt_labels))) if len(all_gt_index) > 0: accuracy = np.sum(preds[list(zip(*all_gt_index))[0], list(zip(*all_gt_index))[1]] == gt_labels[list(zip(*all_gt_index))[0], list(zip(*all_gt_index))[1]])/ len(all_gt_index) print("batch idx:{0} current batch accuracy is :{1}".format(idx, accuracy)) all_gt_label.extend(gt_labels) all_pred_label.extend(preds) all_gt_label = np.asarray(all_gt_label) # shape = (N, len(AU_SQUEEZE)) all_pred_label = np.asarray(all_pred_label) # shape = (N, len(AU_SQUEEZE)) AU_gt_label = np.transpose(all_gt_label) # shape = (len(AU_SQUEEZE), N) AU_pred_label = np.transpose(all_pred_label) # shape= (len(AU_SQUEEZE), N) report = defaultdict(dict) reporter = Reporter() reporter.add_observer("main", target) summary = DictSummary() for AU_squeeze_idx, pred_label in enumerate(AU_pred_label): AU = config.AU_SQUEEZE[AU_squeeze_idx] if AU in self.paper_use_AU: gt_label = AU_gt_label[AU_squeeze_idx] # met_E = get_F1_event(gt_label, pred_label) met_F = get_F1_frame(gt_label, pred_label) roc = get_ROC(gt_label, pred_label) f1 = f1_score(gt_label, pred_label) # report["f1_frame"][AU] = met_F.f1f report["f1_score"][AU] = f1 # assert f1 == met_F.f1f report["AUC"][AU] = roc.auc report["accuracy"][AU] = met_F.accuracy # report["f1_event"][AU] = np.median(met_E.f1EventCurve) summary.add({"f1_score_avg": f1}) summary.add({"AUC_avg": roc.auc}) summary.add({"accuracy_avg": met_F.accuracy}) # summary.add({"f1_event_avg": np.median(met_E.f1EventCurve)}) observation = {} with reporter.scope(observation): reporter.report(report, target) reporter.report(summary.compute_mean(), target) npz_gt = np.array(npz_gt) # N, F, 12 npz_pred = np.array(npz_pred) # N, F, 12 npz_pred_score = np.array(npz_pred_score) np.savez(self.npz_out_path, gt=npz_gt, pred=npz_pred, pred_score=npz_pred_score) return observation
def evaluate(self): iterator = self._iterators['main'] _target = self._targets["main"] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) reporter = Reporter() reporter.add_observer("main", _target) summary = DictSummary() model = _target pred_labels_array = [] gt_labels_array = [] unreduce_pred = [] unreduce_gt = [] last_seq_id = None one_frame_predict_list = [] one_frame_gt_list = [] for idx, batch in enumerate(it): print("processing :{}".format(idx)) batch = self.converter(batch, self.device) feature, gt_seg_rgb, gt_seg_flow, seg_info, seg_labels, gt_labels, npz_file_path = batch # feature shape =(B, C, W); bboxes shape = B,W 4; labels shape B, W, 12 seg_id = os.path.basename(npz_file_path[0]) seg_id = seg_id[:seg_id.rindex("#")] if last_seq_id is None: last_seq_id = seg_id if last_seq_id != seg_id: one_frame_predict_result = np.stack(one_frame_predict_list, axis=2) # B, W, F, class unreduce_pred.extend(one_frame_predict_result.reshape(-1, one_frame_predict_result.shape[-2],one_frame_predict_result.shape[-1])) # list of W, F, class one_frame_predict_result = np.bitwise_or.reduce(one_frame_predict_result, axis=2) # B, W, class one_frame_predict_result = one_frame_predict_result.reshape([-1, one_frame_predict_result.shape[-1]]) # B* W, class pred_labels_array.extend(one_frame_predict_result) one_frame_gt_result = np.stack(one_frame_gt_list, axis=2) # B, W, F, class unreduce_gt.extend(one_frame_gt_result.reshape(-1, one_frame_gt_result.shape[-2], one_frame_gt_result.shape[-1])) # list of W, F, class one_frame_gt_result = np.bitwise_or.reduce(one_frame_gt_result, axis=2) # B, W, class one_frame_gt_result = one_frame_gt_result.reshape([-1, one_frame_gt_result.shape[-1]]) # B * W, class gt_labels_array.extend(one_frame_gt_result) one_frame_predict_list.clear() one_frame_gt_list.clear() if not isinstance(feature, chainer.Variable): feature = chainer.Variable(feature.astype('f')) # feature = (B, C, W) predict_labels = model.predict(feature) # (B, W, class) one_frame_predict_list.append(predict_labels) gt_labels = chainer.cuda.to_cpu(gt_labels) one_frame_gt_list.append(gt_labels) one_frame_predict_result = np.stack(one_frame_predict_list, axis=2) # B, W, F, class unreduce_pred.extend(one_frame_predict_result.reshape(-1, one_frame_predict_result.shape[-2],one_frame_predict_result.shape[-1])) # list of W, F, class one_frame_predict_result = np.bitwise_or.reduce(one_frame_predict_result, axis=2) # B, W, class assert one_frame_predict_result.shape[-1] == self.class_num one_frame_predict_result = one_frame_predict_result.reshape( [-1, one_frame_predict_result.shape[-1]]) # B* W, class pred_labels_array.extend(one_frame_predict_result) one_frame_gt_result = np.stack(one_frame_gt_list, axis=2) # B, W, F, class unreduce_gt.extend(one_frame_gt_result.reshape(-1, one_frame_gt_result.shape[-2], one_frame_gt_result.shape[-1])) # list of W, F, class one_frame_gt_result = np.bitwise_or.reduce(one_frame_gt_result, axis=2) # B, W, class one_frame_gt_result = one_frame_gt_result.reshape([-1, one_frame_gt_result.shape[-1]]) # B * W, class gt_labels_array.extend(one_frame_gt_result) one_frame_predict_list.clear() one_frame_gt_list.clear() # 由于F不一样,因此不能stack # unreduce_pred = np.stack(unreduce_pred).astype(np.int32) # N, W, F, class # unreduce_gt = np.stack(unreduce_gt).astype(np.int32) # N, W, F, class # np.savez(self.output_path , predict=unreduce_pred, gt=unreduce_gt) gt_labels_array = np.stack(gt_labels_array) # all_N, 12 pred_labels_array = np.stack(pred_labels_array) # shape = all_N, out_size gt_labels = np.transpose(gt_labels_array) # shape = Y x frame pred_labels = np.transpose(pred_labels_array) #shape = Y x frame report_dict = dict() AU_id_convert_dict = self.AU_convert if self.AU_convert else config.AU_SQUEEZE for new_AU_idx, frame_pred in enumerate(pred_labels): if AU_id_convert_dict[new_AU_idx] in self.paper_use_AU: AU = AU_id_convert_dict[new_AU_idx] frame_gt = gt_labels[new_AU_idx] F1 = f1_score(y_true=frame_gt, y_pred=frame_pred) report_dict[AU] = F1 summary.add({"f1_frame_avg": F1}) observation = {} with reporter.scope(observation): reporter.report(report_dict, model) reporter.report(summary.compute_mean(), model) return observation
def evaluate(self): iterator = self._iterators['main'] _target = self._targets["main"] if hasattr(iterator, 'reset'): iterator.reset() it = iterator else: it = copy.copy(iterator) reporter = Reporter() reporter.add_observer("main", _target) summary = DictSummary() model = _target pred_labels_array = [] gt_labels_array = [] unreduce_pred = [] unreduce_gt = [] for idx, batch in enumerate(it): print("processing :{}".format(idx)) batch = self.converter(batch, self.device) rgb_images, flow_images, bboxes, labels = batch # images shape = B*T, C, H, W; bboxes shape = B*T, F, 4; labels shape = B*T, F, 12 if not isinstance(rgb_images, chainer.Variable): rgb_images = chainer.Variable(rgb_images.astype('f')) bboxes = chainer.Variable(bboxes.astype('f')) flow_images = chainer.Variable(flow_images.astype('f')) roi_feature = model.get_roi_feature(rgb_images, flow_images, bboxes, extract_rgb_flow=False) pred_labels = model.predict(roi_feature) # B, F, 12 unreduce_pred.extend(pred_labels) # list of F,D pred_labels = np.bitwise_or.reduce(pred_labels, axis=1) # B, class_number unreduce_gt.extend(labels) # shape = list of F,D labels = np.bitwise_or.reduce(labels, axis=1) # B, class_number assert labels.shape == pred_labels.shape pred_labels_array.extend(pred_labels) gt_labels_array.extend(labels) unreduce_pred = np.stack(unreduce_pred).astype(np.int32) unreduce_gt = np.stack(unreduce_gt).astype(np.int32) np.savez(self.output_path , pred=unreduce_pred, gt=unreduce_gt) gt_labels_array = np.stack(gt_labels_array) pred_labels_array = np.stack(pred_labels_array) # shape = all_N, out_size gt_labels = np.transpose(gt_labels_array) # shape = Y x frame pred_labels = np.transpose(pred_labels_array) #shape = Y x frame report_dict = dict() AU_id_convert_dict = self.AU_convert if self.AU_convert else config.AU_SQUEEZE for new_AU_idx, frame_pred in enumerate(pred_labels): if AU_id_convert_dict[new_AU_idx] in self.paper_use_AU: AU = AU_id_convert_dict[new_AU_idx] frame_gt = gt_labels[new_AU_idx] F1 = f1_score(y_true=frame_gt, y_pred=frame_pred) report_dict[AU] = F1 summary.add({"f1_frame_avg": F1}) observation = {} with reporter.scope(observation): reporter.report(report_dict, model) reporter.report(summary.compute_mean(), model) return observation
def evaluate(self): iterator = self._iterators['main'] _target = self._targets["main"] if hasattr(iterator, 'reset'): iterator.reset_for_train_mode() it = iterator else: it = copy.copy(iterator) reporter = Reporter() reporter.add_observer("main", _target) # will fail to run? summary = DictSummary() trainer_result = defaultdict(list) for batch in it: batch = convert(batch, self.device) for x, crf_pact_structure in zip(*batch): sample = crf_pact_structure.sample file_path = sample.file_path print("evaluate file:{0}".format(file_path)) video_id = os.path.basename(file_path) train_keyword = os.path.basename(os.path.dirname(file_path)) # train_keyword comes from file_path if train_keyword not in self.target_dict: print("error {} not pre-trained".format(train_keyword)) continue target = self.target_dict[train_keyword] # choose the right predictor # pred_probs is N x (Y'+1) pred_labels = target.predict(x, crf_pact_structure, is_bin=False) # pred_labels is N x 1 gt_labels = target.get_gt_label_one_graph(np, crf_pact_structure, is_bin=False) # return N x 1 trainer_result[train_keyword].append((video_id, pred_labels, gt_labels)) report_dict = defaultdict(dict) # key = train_keyword for train_keyword, pred_gt_list in trainer_result.items(): trainer_pred_labels = [] trainer_gt_labels = [] for video_id, pred_labels, gt_labels in sorted(pred_gt_list, key=lambda e:e[0]): trainer_pred_labels.extend(pred_labels) trainer_gt_labels.extend(gt_labels) trainer_pred_labels = np.asarray(trainer_pred_labels,dtype=np.int32) trainer_gt_labels = np.asarray(trainer_gt_labels,dtype=np.int32) assert len(trainer_gt_labels) == len(trainer_pred_labels) node_number = len(trainer_gt_labels) gt_labels = np.zeros((node_number, len(config.AU_SQUEEZE)), dtype=np.int32) # frame x Y pred_labels = np.zeros((node_number, len(config.AU_SQUEEZE)), dtype=np.int32) for node_idx, gt_label in enumerate(trainer_gt_labels): if gt_label > 0: AU = train_keyword.split("_")[gt_label - 1] AU_squeeze_idx = config.AU_SQUEEZE.inv[AU] gt_labels[node_idx, AU_squeeze_idx] = 1 pred_label = trainer_pred_labels[node_idx] if pred_label > 0: AU = train_keyword.split("_")[pred_label - 1] AU_squeeze_idx = config.AU_SQUEEZE.inv[AU] pred_labels[node_idx, AU_squeeze_idx] = 1 gt_labels = gt_labels.reshape(-1, config.BOX_NUM[self.database], len(config.AU_SQUEEZE)) pred_labels = pred_labels.reshape(-1, config.BOX_NUM[self.database], len(config.AU_SQUEEZE)) gt_labels = np.bitwise_or.reduce(gt_labels, axis=1) # shape = frame x Y pred_labels = np.bitwise_or.reduce(pred_labels, axis=1) # shape = frame x Y gt_labels = np.transpose(gt_labels) # shape = Y x frame pred_labels = np.transpose(pred_labels) #shape = Y x frame for AU_idx, frame_pred in enumerate(pred_labels): if config.AU_SQUEEZE[AU_idx] in self.paper_use_AU: if config.AU_SQUEEZE[AU_idx] in train_keyword.split("_"): AU = config.AU_SQUEEZE[AU_idx] frame_gt = gt_labels[AU_idx] F1 = f1_score(y_true=frame_gt, y_pred=frame_pred) accuracy = accuracy_score(frame_gt, frame_pred) report_dict[train_keyword][AU] = F1 merge_dict = {} for train_keyword, AU_F1 in report_dict.items(): for AU, F1 in AU_F1.items(): if AU in self.paper_use_AU: if AU not in merge_dict: merge_dict[AU] = F1 elif F1 > merge_dict[AU]: merge_dict[AU] = F1 report_dict["merge_result"] = merge_dict observation = {} with reporter.scope(observation): reporter.report(report_dict, _target) reporter.report(summary.compute_mean(), _target) print(report_dict) return observation