print('==========', datetime.now() - start_time) if __name__ == "__main__": Mean = [123.68, 116.78, 103.94] # Mean = [102.9801, 115.9465, 122.7717][::-1] config = Config( False, Mean, None, lr=0.00125, weight_decay=0.0001, num_cls=80, img_max=1333, img_min=800, anchor_scales=[[32], [64], [128], [256], [512]], anchor_ratios=[[0.5, 1, 2], [0.5, 1, 2], [0.5, 1, 2], [0.5, 1, 2], [0.5, 1, 2]], fast_n_sample=512, roi_min_size=[4, 8, 16, 32, 64], roi_train_pre_nms=12000, roi_train_post_nms=2000, roi_test_pre_nms=6000, roi_test_post_nms=1000, ) model = Mask_Rcnn model_file = '/home/zhai/PycharmProjects/Demo35/cascade_rcnn/train_M_GPU_single_node/models/Mask_Rcnn_cascade_4x_360000_1_0.pth' test(model, config, model_file)
path = '/home/zhai/PycharmProjects/Demo35/cascade_rcnn/data_preprocess/' Bboxes = [path + 'coco_bboxes_2017.pkl'] img_paths = [path + 'coco_imgpaths_2017.pkl'] masks = [path + 'coco_mask_2017.pkl'] files = [img_paths, Bboxes, masks] config = Config(True, Mean, files, num_cls=80, lr=0.00125, weight_decay=0.0001, batch_size_per_GPU=2, gpus=2, img_max=1333, img_min=800, anchor_scales=[[32], [64], [128], [256], [512]], anchor_ratios=[[0.5, 1, 2], [0.5, 1, 2], [0.5, 1, 2], [0.5, 1, 2], [0.5, 1, 2]], fast_n_sample=512, bias_lr_factor=2, roi_min_size=[4, 8, 16, 32, 64], roi_train_pre_nms=12000, roi_train_post_nms=2000, roi_test_pre_nms=6000, roi_test_post_nms=1000) num_gpus = int( os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 assert num_gpus == config.gpus print('--------GPUs--------', num_gpus) step = 0
break if flag: break torch.save(model.state_dict(), './models/vgg16_cascade_final_1.pth') if __name__ == "__main__": Mean = [123.68, 116.78, 103.94] path = '/home/zhai/PycharmProjects/Demo35/cascade_rcnn/data_preprocess/' Bboxes = [path + 'Bboxes_07.pkl', path + 'Bboxes_12.pkl'] img_paths = [path + 'img_paths_07.pkl', path + 'img_paths_12.pkl'] files = [img_paths, Bboxes] config = Config(True, Mean, files, lr=0.001, weight_decay=0.0005, batch_size_per_GPU=1, img_max=1000, img_min=600, bias_lr_factor=2, roi_min_size=16) step = 0 model = Faster_Rcnn x = 1 pre_model_file = '/home/zhai/PycharmProjects/Demo35/py_Faster_tool/pre_model/vgg16_cf.pth' model_file = None train(model, config, step, x, pre_model_file, model_file=model_file)
cv2.imshow('a', im) cv2.waitKey(2000) return im if __name__ == "__main__": from datetime import datetime from cascade_rcnn.tool.config import Config Mean = [123.68, 116.78, 103.94] path = '/home/zhai/PycharmProjects/Demo35/pytorch_Faster_tool/data_preprocess/' Bboxes = [path + 'coco_bboxes_2017.pkl'] img_paths = [path + 'coco_imgpaths_2017'] masks = [path + 'coco_mask_2017.pkl'] files = [img_paths, Bboxes, masks] config = Config(True, Mean, files, lr=0.00125, img_max=1000, img_min=600) dataset = Read_Data(config) dataloader = DataLoader(dataset, batch_size=1, num_workers=16, collate_fn=lambda x: x) dataloader = DataLoader(dataset, batch_size=2, collate_fn=func, shuffle=True, drop_last=True, pin_memory=False, num_workers=1) c = 0 for i in range(2):
Res = {} start_time = datetime.now() for name in names[:m]: i += 1 print(datetime.now(), i) im_file = test_dir + name + '.jpg' img = cv2.imread(im_file) img = torch.tensor(img) res = model(img) res = res.cpu() res = res.detach().numpy() Res[name] = res print('==========', datetime.now() - start_time) joblib.dump(Res, 'Faster_vgg16_cascade_1.pkl') GT = joblib.load('../mAP/voc_GT.pkl') AP = mAP(Res, GT, 20, iou_thresh=0.5, use_07_metric=True, e=0.01) print(AP) AP = AP.mean() print(AP) if __name__ == "__main__": Mean = [123.68, 116.78, 103.94] model = Faster_Rcnn config = Config(False, Mean, None, img_max=1000, img_min=600, roi_min_size=16) model_file = '/home/zhai/PycharmProjects/Demo35/cascade_rcnn/train_one_GPU/models/vgg16_cascade_90000_1.pth' test(model, config, model_file)
if __name__ == "__main__": Mean = [123.68, 116.78, 103.94] path = '/home/zhai/PycharmProjects/Demo35/cascade_rcnn/data_preprocess/' Bboxes = [path + 'Bboxes_07.pkl', path + 'Bboxes_12.pkl'] img_paths = [path + 'img_paths_07.pkl', path + 'img_paths_12.pkl'] files = [img_paths, Bboxes] config = Config(True, Mean, files, lr=0.001, weight_decay=0.0005, batch_size_per_GPU=1, img_max=1000, img_min=600, roi_min_size=16, roi_train_pre_nms=12000, roi_train_post_nms=2000, roi_test_pre_nms=6000, roi_test_post_nms=300, bias_lr_factor=2) step = 0 model = Faster_Rcnn x = 1 pre_model_file = '/home/zhai/PycharmProjects/Demo35/py_Faster_tool/pre_model/vgg16_cf.pth' model_file = '' train(model, config, step, x, pre_model_file, model_file=model_file)