def evaluate_acc(net, data_iter, ctx): data_iter.reset() box_metric = metric.MAE() outs, labels = None, None for i, batch in enumerate(data_iter): data = batch.data[0].as_in_context(ctx) label = batch.label[0].as_in_context(ctx) # print('acc',label.shape) anchors, box_preds, cls_preds = net(data) #MultiBoxTraget 作用是将生成的anchors与哪些ground truth对应,提取出anchors的偏移和对应的类型 #预测的误差是每次网络输出的预测框g与anchors的差分别/anchor[xywh],然后作为smoothL1(label-g)解算,g才是预测 # 正负样本比例1:3 box_offset, box_mask, cls_labels = MultiBoxTarget( anchors, label, cls_preds.transpose((0, 2, 1)), negative_mining_ratio=3.0) box_metric.update([box_offset], [box_preds * box_mask]) cls_probs = nd.SoftmaxActivation(cls_preds.transpose((0, 2, 1)), mode='channel') #对输出的bbox通过NMS极大值抑制算法筛选检测框 out = MultiBoxDetection(cls_probs, box_preds, anchors, force_suppress=True, clip=False, nms_threshold=0.45) if outs is None: outs = out labels = label else: outs = nd.concat(outs, out, dim=0) labels = nd.concat(labels, label, dim=0) AP = evaluate_MAP(outs, labels) return AP, box_metric
def testClassify(net, fname): with open(fname, 'rb') as f: img = image.imdecode(f.read()) data, _ = transformTest(img, -1, test_augs) plt.imshow(data.transpose((1, 2, 0)).asnumpy() / 255) data = data.expand_dims(axis=0) out = net(data) out = nd.SoftmaxActivation(out) pred = int(nd.argmax(out, axis=1).asscalar()) prob = out[0][pred].asscalar() label = train_set.synsets return ('With prob=%f, %s'%(prob, label[pred]))
def predict(img_nd, net, num_classes): #predict tic = time.time() ssd_layers = net(img_nd) arm_loc_preds, arm_cls_preds, arm_anchor_boxes, odm_loc_preds, odm_cls_preds = multibox_layer(ssd_layers,\ num_classes,sizes,ratios,normalizations) #process result odm_anchor_boxes = refine_anchor_generator(arm_anchor_boxes, arm_loc_preds) odm_cls_prob = nd.SoftmaxActivation(odm_cls_preds, mode='channel') out = MultiBoxDetection(odm_cls_prob,odm_loc_preds,odm_anchor_boxes,\ force_suppress=True,clip=False,nms_threshold=.5) out = out.asnumpy() print(out.shape) print('detect time:', time.time() - tic) return out
def predict(img_nd, net): #predict tic = time.time() anchors, box_preds, cls_preds = net(img_nd) #process result cls_probs = nd.SoftmaxActivation(cls_preds.transpose((0, 2, 1)), mode='channel') out = MultiBoxDetection(cls_probs, box_preds, anchors, force_suppress=True, clip=False, nms_threshold=0.1) out = out.asnumpy() print(out.shape) print('detect time:', time.time() - tic) return out
def main(): kinetics_classes = [x.strip() for x in open(_LABEL_MAP_PATH)] # Test rgb model # rgb input has 3 channels # sample input x = mx.nd.array(np.load(_SAMPLE_PATHS['rgb']), ctx=ctx) x = x.reshape((_BATCH_SIZE, _NUM_CHANNELS, _SAMPLE_VIDEO_FRAMES, _IMAGE_SIZE, _IMAGE_SIZE)) # build model net = i3d.i3d() # load trained parameters net.load_parameters(os.path.join(_SAVE_DIR, 'first')) output = net(x) # get predicted top 1 class by softmax probability output_softmax = nd.SoftmaxActivation(output).asnumpy()[0] sorted_indeces = np.argsort(output_softmax)[::-1][0] print(kinetics_classes[sorted_indeces])
def evaluate_acc(net, data_iter, ctx): data_iter.reset() box_metric = metric.MAE() outs, labels = None, None for i, batch in enumerate(data_iter): data = batch.data[0].as_in_context(ctx) label = batch.label[0].as_in_context(ctx) # print('acc',label.shape) ssd_layers = net(data) arm_loc_preds, arm_cls_preds, arm_anchor_boxes, odm_loc_preds, odm_cls_preds = multibox_layer(ssd_layers,\ num_classes,sizes,ratios,normalizations) # arm_loc_preds, arm_cls_preds, arm_anchor_boxes, odm_loc_preds, odm_cls_preds = net(data) label_arm = nd.Custom(label, op_type='modify_label') arm_tmp = MultiBoxTarget(arm_anchor_boxes,label_arm,arm_cls_preds,overlap_threshold=.5,\ negative_mining_ratio=3,negative_mining_thresh=.5) arm_loc_target = arm_tmp[0] # box offset arm_loc_target_mask = arm_tmp[1] # box mask (only 0,1) arm_cls_target = arm_tmp[2] # every anchor' idx odm_anchor_boxes = refine_anchor_generator( arm_anchor_boxes, arm_loc_preds) #(batch,h*w*num_anchors[:layers],4) odm_anchor_boxes_bs = nd.split(data=odm_anchor_boxes, axis=0, num_outputs=label.shape[0]) # list odm_loc_target = [] odm_loc_target_mask = [] odm_cls_target = [] label_bs = nd.split(data=label, axis=0, num_outputs=label.shape[0]) odm_cls_preds_bs = nd.split(data=odm_cls_preds, axis=0, num_outputs=label.shape[0]) for j in range(label.shape[0]): if label.shape[0] == 1: odm_tmp = MultiBoxTarget(odm_anchor_boxes_bs[j].expand_dims(axis=0),label_bs[j].expand_dims(axis=0),\ odm_cls_preds_bs[j].expand_dims(axis=0),overlap_threshold=.5,negative_mining_ratio=2,negative_mining_thresh=.5) ## 多个batch else: odm_tmp = MultiBoxTarget(odm_anchor_boxes_bs[j],label_bs[j],\ odm_cls_preds_bs[j],overlap_threshold=.5,negative_mining_ratio=3,negative_mining_thresh=.5) odm_loc_target.append(odm_tmp[0]) odm_loc_target_mask.append(odm_tmp[1]) odm_cls_target.append(odm_tmp[2]) odm_loc_target = nd.concat(*odm_loc_target, dim=0) odm_loc_target_mask = nd.concat(*odm_loc_target_mask, dim=0) odm_cls_target = nd.concat(*odm_cls_target, dim=0) # negitave filter group = nd.Custom(arm_cls_preds, odm_cls_target, odm_loc_target_mask, op_type='negative_filtering') odm_cls_target = group[0] #用ARM中的cls过滤后的odm_cls odm_loc_target_mask = group[1] #过滤掉的mask为0 # arm_cls_prob = nd.SoftmaxActivation(arm_cls_preds, mode='channel') odm_cls_prob = nd.SoftmaxActivation(odm_cls_preds, mode='channel') out = MultiBoxDetection(odm_cls_prob,odm_loc_preds,odm_anchor_boxes,\ force_suppress=True,clip=False,nms_threshold=.5,nms_topk=400) # print(out.shape) if outs is None: outs = out labels = label else: outs = nd.concat(outs, out, dim=0) labels = nd.concat(labels, label, dim=0) box_metric.update([odm_loc_target], [odm_loc_preds * odm_loc_target_mask]) AP = evaluate_MAP(outs, labels) return AP, box_metric
def mytrain(net,num_classes,train_data,valid_data,ctx,start_epoch, end_epoch, \ arm_cls_loss=arm_cls_loss,cls_loss=cls_loss,box_loss=box_loss,trainer=None): if trainer is None: # trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01,'momentum':0.9, 'wd':50.0}) trainer = gluon.Trainer(net.collect_params(), 'adam', { 'learning_rate': 0.001, 'clip_gradient': 2.0 }) # trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': 0.003}) box_metric = metric.MAE() ## add visible # collect parameter names for logging the gradients of parameters in each epoch params = net.collect_params() # param_names = params.keys() # define a summary writer that logs data and flushes to the file every 5 seconds sw = SummaryWriter(logdir='./logs', flush_secs=5) global_step = 0 for e in range(start_epoch, end_epoch): # print(e) train_data.reset() valid_data.reset() box_metric.reset() tic = time.time() _loss = [0, 0] arm_loss = [0, 0] # if e == 6 or e == 100: # trainer.set_learning_rate(trainer.learning_rate * 0.2) outs, labels = None, None for i, batch in enumerate(train_data): # print('----- batch {} start ----'.format(i)) data = batch.data[0].as_in_context(ctx) label = batch.label[0].as_in_context(ctx) # print('label shape: ',label.shape) with autograd.record(): # 1. generate results according to extract network ssd_layers = net(data) arm_loc_preds, arm_cls_preds, arm_anchor_boxes, odm_loc_preds, odm_cls_preds = multibox_layer(ssd_layers,\ num_classes,sizes,ratios,normalizations) # arm_loc_preds, arm_cls_preds, arm_anchor_boxes, odm_loc_preds, odm_cls_preds = net(data) # print('---------1111-----------') # 2. ARM predict ## 2.1 modify label as [-1,0,..] label_arm = nd.Custom(label, op_type='modify_label') arm_tmp = MultiBoxTarget(arm_anchor_boxes,label_arm,arm_cls_preds,overlap_threshold=.5,\ negative_mining_ratio=3,negative_mining_thresh=.5) arm_loc_target = arm_tmp[0] # box offset arm_loc_target_mask = arm_tmp[1] # box mask (only 0,1) arm_cls_target = arm_tmp[2] # every anchor' idx # print(sum(arm_cls_target[0])) # print('---------2222-----------') # 3. ODM predict ## 3.1 refine anchor generator originate in ARM odm_anchor_boxes = refine_anchor_generator( arm_anchor_boxes, arm_loc_preds) #(batch,h*w*num_anchors[:layers],4) # ### debug backward err # odm_anchor_boxes = arm_anchor_boxes odm_anchor_boxes_bs = nd.split( data=odm_anchor_boxes, axis=0, num_outputs=label.shape[0]) # list # print('---3 : odm_anchor_boxes_bs shape : {}'.format(odm_anchor_boxes_bs[0].shape)) # print('---------3333-----------') ## 3.2 对当前所有batch的data计算 Target (多个gpu使用) odm_loc_target = [] odm_loc_target_mask = [] odm_cls_target = [] label_bs = nd.split(data=label, axis=0, num_outputs=label.shape[0]) odm_cls_preds_bs = nd.split(data=odm_cls_preds, axis=0, num_outputs=label.shape[0]) # print('---4 : odm_cls_preds_bs shape: {}'.format(odm_cls_preds_bs[0].shape)) # print('---4 : label_bs shape: {}'.format(label_bs[0].shape)) for j in range(label.shape[0]): if label.shape[0] == 1: odm_tmp = MultiBoxTarget(odm_anchor_boxes_bs[j].expand_dims(axis=0),label_bs[j].expand_dims(axis=0),\ odm_cls_preds_bs[j].expand_dims(axis=0),overlap_threshold=.5,negative_mining_ratio=2,negative_mining_thresh=.5) ## 多个batch else: odm_tmp = MultiBoxTarget(odm_anchor_boxes_bs[j],label_bs[j],\ odm_cls_preds_bs[j],overlap_threshold=.5,negative_mining_ratio=3,negative_mining_thresh=.5) odm_loc_target.append(odm_tmp[0]) odm_loc_target_mask.append(odm_tmp[1]) odm_cls_target.append(odm_tmp[2]) ### concat ,上面为什么会单独计算每张图,odm包含了batch,so需要拆 odm_loc_target = nd.concat(*odm_loc_target, dim=0) odm_loc_target_mask = nd.concat(*odm_loc_target_mask, dim=0) odm_cls_target = nd.concat(*odm_cls_target, dim=0) # 4. negitave filter group = nd.Custom(arm_cls_preds, odm_cls_target, odm_loc_target_mask, op_type='negative_filtering') odm_cls_target = group[0] #用ARM中的cls过滤后的odm_cls odm_loc_target_mask = group[1] #过滤掉的mask为0 # print('---------4444-----------') # 5. calc loss # TODO:add 1/N_arm, 1/N_odm (num of positive anchors) # arm_cls_loss = gluon.loss.SoftmaxCrossEntropyLoss() arm_loss_cls = arm_cls_loss(arm_cls_preds.transpose((0, 2, 1)), arm_cls_target) arm_loss_loc = box_loss(arm_loc_preds, arm_loc_target, arm_loc_target_mask) # print('55555 loss-> arm_loss_cls : {} arm_loss_loc {}'.format(arm_loss_cls.shape,arm_loss_loc.shape)) # print('arm_loss_cls loss : {}'.format(arm_loss_cls)) # odm_cls_prob = nd.softmax(odm_cls_preds,axis=2) tmp = odm_cls_preds.transpose((0, 2, 1)) odm_loss_cls = cls_loss(odm_cls_preds.transpose((0, 2, 1)), odm_cls_target) odm_loss_loc = box_loss(odm_loc_preds, odm_loc_target, odm_loc_target_mask) # print('66666 loss-> odm_loss_cls : {} odm_loss_loc {}'.format(odm_loss_cls.shape,odm_loss_loc.shape)) # print('odm_loss_cls loss :{} '.format(odm_loss_cls)) # print('odm_loss_loc loss :{} '.format(odm_loss_loc)) # print('N_arm: {} ; N_odm: {} '.format(nd.sum(arm_loc_target_mask,axis=1)/4.0,nd.sum(odm_loc_target_mask,axis=1)/4.0)) # loss = arm_loss_cls+arm_loss_loc+odm_loss_cls+odm_loss_loc loss = 1/(nd.sum(arm_loc_target_mask,axis=1)/4.0) *(arm_loss_cls+arm_loss_loc) + \ 1/(nd.sum(odm_loc_target_mask,axis=1)/4.0)*(odm_loss_cls+odm_loss_loc) sw.add_scalar(tag='loss', value=loss.mean().asscalar(), global_step=global_step) global_step += 1 loss.backward(retain_graph=False) # autograd.backward(loss) # print(net.collect_params().get('conv4_3_weight').data()) # print(net.collect_params().get('vgg0_conv9_weight').grad()) ### 单独测试梯度 # arm_loss_cls.backward(retain_graph=False) # arm_loss_loc.backward(retain_graph=False) # odm_loss_cls.backward(retain_graph=False) # odm_loss_loc.backward(retain_graph=False) trainer.step(data.shape[0]) _loss[0] += nd.mean(odm_loss_cls).asscalar() _loss[1] += nd.mean(odm_loss_loc).asscalar() arm_loss[0] += nd.mean(arm_loss_cls).asscalar() arm_loss[1] += nd.mean(arm_loss_loc).asscalar() # print(arm_loss) arm_cls_prob = nd.SoftmaxActivation(arm_cls_preds, mode='channel') odm_cls_prob = nd.SoftmaxActivation(odm_cls_preds, mode='channel') out = MultiBoxDetection(odm_cls_prob,odm_loc_preds,odm_anchor_boxes,\ force_suppress=True,clip=False,nms_threshold=.5,nms_topk=400) # print('out shape: {}'.format(out.shape)) if outs is None: outs = out labels = label else: outs = nd.concat(outs, out, dim=0) labels = nd.concat(labels, label, dim=0) box_metric.update([odm_loc_target], [odm_loc_preds * odm_loc_target_mask]) print('-------{} epoch end ------'.format(e)) train_AP = evaluate_MAP(outs, labels) valid_AP, val_box_metric = evaluate_acc(net, valid_data, ctx) info["train_ap"].append(train_AP) info["valid_ap"].append(valid_AP) info["loss"].append(_loss) print('odm loss: ', _loss) print('arm loss: ', arm_loss) if e == 0: sw.add_graph(net) # grads = [i.grad() for i in net.collect_params().values()] # grads_4_3 = net.collect_params().get('vgg0_conv9_weight').grad() # sw.add_histogram(tag ='vgg0_conv9_weight',values=grads_4_3,global_step=e, bins=1000 ) grads_4_2 = net.collect_params().get('vgg0_conv5_weight').grad() sw.add_histogram(tag='vgg0_conv5_weight', values=grads_4_2, global_step=e, bins=1000) # assert len(grads) == len(param_names) # logging the gradients of parameters for checking convergence # for i, name in enumerate(param_names): # sw.add_histogram(tag=name, values=grads[i], global_step=e, bins=1000) # net.export('./Model/RefineDet_MeterDetect') # net if (e + 1) % 5 == 0: print( "epoch: %d time: %.2f cls loss: %.4f,reg loss: %.4f lr: %.5f" % (e, time.time() - tic, _loss[0], _loss[1], trainer.learning_rate)) print("train mae: %.4f AP: %.4f" % (box_metric.get()[1], train_AP)) print("valid mae: %.4f AP: %.4f" % (val_box_metric.get()[1], valid_AP)) sw.add_scalar(tag='train_AP', value=train_AP, global_step=e) sw.add_scalar(tag='valid_AP', value=valid_AP, global_step=e) sw.close() if True: info["loss"] = np.array(info["loss"]) info["cls_loss"] = info["loss"][:, 0] info["box_loss"] = info["loss"][:, 1] plt.figure(figsize=(12, 4)) plt.subplot(121) plot("train_ap") plot("valid_ap") plt.legend(loc="upper right") plt.subplot(122) plot("cls_loss") plot("box_loss") plt.legend(loc="upper right") plt.savefig('loss_curve.png')
def detect_image(img_path): if not os.path.exists(img_path): print('can not find image: ', img_path) # img = Image.open(img_file) #print img_path img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (cfg.img_size, cfg.img_size)) # img = ImageOps.fit(img, [data_shape, data_shape], Image.ANTIALIAS) origin_img = img.copy() img = (img / 255. - cfg.mean) / cfg.std img = np.transpose(img, (2, 0, 1)) img = img[np.newaxis, :] img = F.array(img) print('input image shape: ', img.shape) ctx = mx.gpu(0) net = build_ssd("test", 300, ctx) net.initialize(mx.init.Xavier(magnitude=2), ctx=ctx) net.collect_params().reset_ctx(ctx) params = 'model/ssd.params' net.load_params(params, ctx=ctx) anchors, cls_preds, box_preds = net(img.as_in_context(ctx)) print('anchors', anchors) print('class predictions', cls_preds) print('box delta predictions', box_preds) # convert predictions to probabilities using softmax cls_probs = F.SoftmaxActivation(F.transpose(cls_preds, (0, 2, 1)), mode='channel') # apply shifts to anchors boxes, non-maximum-suppression, etc... output = MultiBoxDetection(*[cls_probs, box_preds, anchors], force_suppress=True, clip=True, nms_threshold=0.01) output = output.asnumpy() pens = dict() plt.imshow(origin_img) thresh = 0.3 for det in output[0]: cid = int(det[0]) if cid < 0: continue score = det[1] if score < thresh: continue if cid not in pens: pens[cid] = (random.random(), random.random(), random.random()) scales = [origin_img.shape[1], origin_img.shape[0]] * 2 xmin, ymin, xmax, ymax = [ int(p * s) for p, s in zip(det[2:6].tolist(), scales) ] rect = plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, edgecolor=pens[cid], linewidth=3) plt.gca().add_patch(rect) voc_class_name = [ 'person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'aeroplane', 'bicycle', 'boat', 'bus', 'car', 'motorbike', 'train', 'bottle', 'chair', 'diningtable', 'pottedplant', 'sofa', 'tvmonitor' ] text = voc_class_name[cid] plt.gca().text(xmin, ymin - 2, '{:s} {:.3f}'.format(text, score), bbox=dict(facecolor=pens[cid], alpha=0.5), fontsize=12, color='white') plt.axis('off') # plt.savefig('result.png', dpi=100) plt.show()
def _test_model(net, ctx, x): net.initialize() net.collect_params().reset_ctx(ctx) output = net(x) output_softmax = nd.SoftmaxActivation(output)
right, count = 0, 0 val_dirs = os.listdir(val_path) for index, dirs in enumerate(val_dirs): # print('%d/%d' % (index, len(val_dirs))) tempClass = dirs # if tempClass == '13': # tempClass = '63' dirs = os.path.join(val_path, dirs) for image_path in os.listdir(dirs): image_path = os.path.join(dirs, image_path) with open(image_path, 'rb') as f: img = image.imdecode(f.read()) data = transform_predict(img) out1 = net1(data.as_in_context(ctx)) out1 = nd.SoftmaxActivation(out1).mean(axis=0) pred_class = np.argmax(out1.asnumpy()) results[image_path] = out1.asnumpy() count += 1 # outnp = out1.asnumpy() # argsorts = np.argsort(outnp) # print(sorted_ids[argsorts[-5]], sorted_ids[argsorts[-4]], sorted_ids[argsorts[-3]], # sorted_ids[argsorts[-2]], sorted_ids[argsorts[-1]], outnp[argsorts[-5:]]) # print(image_path, sorted_ids[pred_class], tempClass, '\n\n') if sorted_ids[pred_class] == int(tempClass): right += 1 else: print(image_path, sorted_ids[pred_class], out1.asnumpy()[pred_class], tempClass) print('%d/%d, %.4f' % (right, count, float(right) / count))
net = Model(pretrained_model_name=pretrianed_model_name, pretrained=pretrained, ctx=ctx) net.hybridize() net.collect_params().load(best_model_weight_path) sorted_ids = list(range(1, 101)) sorted_ids.sort(key=lambda x: str(x)) # sorted_ids.remove(13) results = {} with open(test_file, 'r') as file: contents = file.readlines() for index, content in enumerate(contents): print('%d/%d' % (index, len(contents)), end='\r') content = content.replace('\n', '') image_path = os.path.join(test_path, content) with open(image_path, 'rb') as f: img = image.imdecode(f.read()) data = transform_predict(img) out = net(data.as_in_context(ctx)) out = nd.SoftmaxActivation(out).mean(axis=0) results[image_path] = out.asnumpy() # pred_class = np.argmax(out.asnumpy()) # results[content] = out.asnumpy() # results.append('%s %d\n' % (content, sorted_ids[pred_class])) pickle.dump(results, open('./datasets/%s_pred_test.pickle' % pretrianed_model_name, 'wb')) # with open(result_file, 'w') as file: # for content in results: # file.write(content)
def mytrain(net, train_data, valid_data, ctx, start_epoch, end_epoch, cls_loss, box_loss, trainer=None): if trainer is None: # trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01,'momentum':0.9, 'wd':5e-1}) trainer = gluon.Trainer(net.collect_params(), 'sgd', { 'learning_rate': 0.1, 'wd': 1e-3 }) box_metric = metric.MAE() for e in range(start_epoch, end_epoch): # print(e) train_data.reset() valid_data.reset() box_metric.reset() tic = time.time() _loss = [0, 0] if e == 100 or e == 120 or e == 150 or e == 180 or e == 200: trainer.set_learning_rate(trainer.learning_rate * 0.2) outs, labels = None, None for i, batch in enumerate(train_data): data = batch.data[0].as_in_context(ctx) label = batch.label[0].as_in_context(ctx) # print(label.shape) with autograd.record(): anchors, box_preds, cls_preds = net(data) # print(anchors.shape,box_preds.shape,cls_preds.shape) # negative_mining_ratio,在生成的mask中增加*3的反例参加loss的计算。 box_offset, box_mask, cls_labels = MultiBoxTarget( anchors, label, cls_preds.transpose(axes=(0, 2, 1)), negative_mining_ratio=3.0) # , overlap_threshold=0.75) loss1 = cls_loss(cls_preds, cls_labels) loss2 = box_loss(box_preds, box_offset, box_mask) loss = loss1 + loss2 # print(loss1.shape,loss2.shape) loss.backward() trainer.step(data.shape[0]) _loss[0] += nd.mean(loss1).asscalar() _loss[1] += nd.mean(loss2).asscalar() cls_probs = nd.SoftmaxActivation(cls_preds.transpose((0, 2, 1)), mode='channel') out = MultiBoxDetection(cls_probs, box_preds, anchors, force_suppress=True, clip=False, nms_threshold=0.45) if outs is None: outs = out labels = label else: outs = nd.concat(outs, out, dim=0) labels = nd.concat(labels, label, dim=0) box_metric.update([box_offset], [box_preds * box_mask]) train_AP = evaluate_MAP(outs, labels) valid_AP, val_box_metric = evaluate_acc(net, valid_data, ctx) info["train_ap"].append(train_AP) info["valid_ap"].append(valid_AP) info["loss"].append(_loss) if (e + 1) % 10 == 0: print("epoch: %d time: %.2f loss: %.4f, %.4f lr: %.5f" % (e, time.time() - tic, _loss[0], _loss[1], trainer.learning_rate)) print("train mae: %.4f AP: %.4f" % (box_metric.get()[1], train_AP)) print("valid mae: %.4f AP: %.4f" % (val_box_metric.get()[1], valid_AP)) if True: info["loss"] = np.array(info["loss"]) info["cls_loss"] = info["loss"][:, 0] info["box_loss"] = info["loss"][:, 1] plt.figure(figsize=(12, 4)) plt.subplot(121) plot("train_ap") plot("valid_ap") plt.legend(loc="upper right") plt.subplot(122) plot("cls_loss") plot("box_loss") plt.legend(loc="upper right") plt.savefig('loss_curve.png')