class Config(object): log = './log_ctn_lanmu' # Path to save log checkpoint_path = './checkpoints_lanmu' # Path to store checkpoint model resume = './checkpoints_lanmu/latest.pth' # load checkpoint model evaluate = None # evaluate model path # train_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-1/dataset/taibiao', 'images/train2017') # val_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-1/dataset/taibiao', 'images/val2017') # dataset_annotations_path = os.path.join('/home/jovyan/data-vol-polefs-1/dataset/taibiao', 'annotations') train_dataset_path = os.path.join( '/home/jovyan/data-vol-polefs-1/dataset/lanmu', 'images/train2017') val_dataset_path = os.path.join( '/home/jovyan/data-vol-polefs-1/dataset/lanmu', 'images/val2017') dataset_annotations_path = os.path.join( '/home/jovyan/data-vol-polefs-1/dataset/lanmu', 'annotations') network = "resnet50_centernet" pretrained = False #********************************************************************************* multi_head = False #the pretrained centernet model to load pre_model_dir = '/home/jovyan/data-vol-1/zhangze/code/pytorch-ImageNet-CIFAR-COCO-VOC-training/detection_experiments/resnet18_centernet_coco_distributed_apex_resize512/checkpoints/best.pth' num_classes = [41] #********************************************************************************* seed = 0 input_image_size = 512 use_multi_scale = False multi_scale_range = [0.6, 1.4] stride = 4 train_dataset = CocoDetection(image_root_dir=train_dataset_path, annotation_root_dir=dataset_annotations_path, set="train2017", transform=transforms.Compose([ RandomFlip(flip_prob=0.5), RandomCrop(crop_prob=0.5), RandomTranslate(translate_prob=0.5), Normalize(), ])) val_dataset = CocoDetection(image_root_dir=val_dataset_path, annotation_root_dir=dataset_annotations_path, set="val2017", transform=transforms.Compose([ Normalize(), Resize(resize=input_image_size), ])) epochs = 140 milestones = [90, 120] per_node_batch_size = 16 lr = 5e-4 num_workers = 4 print_interval = 100 apex = True sync_bn = False
class Config(object): log = './log' # Path to save log checkpoint_path = './checkpoints' # Path to store checkpoint model resume = './checkpoints/latest.pth' # load checkpoint model evaluate = None # evaluate model path train_dataset_path = os.path.join(COCO2017_path, 'images/train2017') val_dataset_path = os.path.join(COCO2017_path, 'images/val2017') dataset_annotations_path = os.path.join(COCO2017_path, 'annotations') network = "resnet18_centernet" pretrained = False num_classes = 80 seed = 0 input_image_size = 512 use_multi_scale = True multi_scale_range = [0.6, 1.4] stride = 4 train_dataset = CocoDetection(image_root_dir=train_dataset_path, annotation_root_dir=dataset_annotations_path, set="train2017", transform=transforms.Compose([ RandomFlip(flip_prob=0.5), RandomCrop(crop_prob=0.5), RandomTranslate(translate_prob=0.5), Normalize(), ])) val_dataset = CocoDetection(image_root_dir=val_dataset_path, annotation_root_dir=dataset_annotations_path, set="val2017", transform=transforms.Compose([ Normalize(), Resize(resize=input_image_size), ])) epochs = 140 milestones = [90, 120] per_node_batch_size = 24 lr = 5e-4 num_workers = 4 print_interval = 100 apex = True sync_bn = False
class Config(object): log = './log' # Path to save log checkpoint_path = './checkpoints' # Path to store checkpoint model resume = './checkpoints/latest.pth' # load checkpoint model evaluate = None # evaluate model path train_dataset_path = os.path.join(COCO2017_path, 'images/train2017') val_dataset_path = os.path.join(COCO2017_path, 'images/val2017') dataset_annotations_path = os.path.join(COCO2017_path, 'annotations') network = "resnet50_retinanet" pretrained = False num_classes = 80 seed = 0 input_image_size = 667 train_dataset = CocoDetection(image_root_dir=train_dataset_path, annotation_root_dir=dataset_annotations_path, set="train2017", transform=transforms.Compose([ RandomFlip(flip_prob=0.5), RandomCrop(crop_prob=0.5), RandomTranslate(translate_prob=0.5), Normalize(), Resize(resize=input_image_size), ])) val_dataset = CocoDetection(image_root_dir=val_dataset_path, annotation_root_dir=dataset_annotations_path, set="val2017", transform=transforms.Compose([ Normalize(), Resize(resize=input_image_size), ])) epochs = 12 batch_size = 16 lr = 1e-4 num_workers = 4 print_interval = 1 apex = True
class Config(object): log = './log_obj365' # Path to save log checkpoint_path = './checkpoints_obj365' # Path to store checkpoint model resume = './checkpoints_obj365/latest.pth' # load checkpoint model evaluate = None # evaluate model path train_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-1/small_sample/obj365', 'images/train2017') val_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-1/small_sample/obj365', 'images/val2017') dataset_annotations_path = os.path.join('/home/jovyan/data-vol-polefs-1/small_sample/obj365', 'annotations') network = "resnet50_centernet" pretrained = False num_classes = [365] seed = 0 input_image_size = 512 multi_head = False #must use at the same time #use mlp layer after head cls_mlp = False #load the params to head2 load_head = False #use selayer before head selayer = False #load the params to head2 load_head = False #use ttf head in centernet head use_ttf = False pre_model_dir = None use_multi_scale = False multi_scale_range = [0.6, 1.4] stride = 4 train_dataset = CocoDetection(image_root_dir=train_dataset_path, annotation_root_dir=dataset_annotations_path, set="train2017", transform=transforms.Compose([ RandomFlip(flip_prob=0.5), RandomCrop(crop_prob=0.5), RandomTranslate(translate_prob=0.5), Normalize(), ])) val_dataset = CocoDetection(image_root_dir=val_dataset_path, annotation_root_dir=dataset_annotations_path, set="val2017", transform=transforms.Compose([ Normalize(), Resize(resize=input_image_size), ])) epochs = 140 milestones = [90, 120] per_node_batch_size = 8 lr = 5e-4 num_workers = 16 print_interval = 100 apex = True sync_bn = False
class Config(object): version = 10 log = './log_ctn_multi_v' + str(version) # Path to save log checkpoint_path = './checkpoints_multi_v' + str(version) # Path to store checkpoint model resume = checkpoint_path + '/latest.pth' # load checkpoint model evaluate = None # evaluate model path train_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-1/dataset/taibiao', 'images/train2017') val_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-1/dataset/taibiao', 'images/val2017') dataset_annotations_path = os.path.join('/home/jovyan/data-vol-polefs-1/dataset/taibiao', 'annotations') # train_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-1/dataset/lanmu', 'images/train2017') # val_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-1/dataset/lanmu', 'images/val2017') # dataset_annotations_path = os.path.join('/home/jovyan/data-vol-polefs-1/dataset/lanmu', 'annotations') # train_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-2/datasets_shenhe/scripts/03-08', 'crop_images') # val_dataset_path = os.path.join('/home/jovyan/data-vol-polefs-2/datasets_shenhe/scripts/03-08', 'train2017') # dataset_annotations_path = os.path.join('/home/jovyan/data-vol-polefs-2/datasets_shenhe/scripts/03-08', 'annotations') network = "resnet50_centernet" pretrained = False #********************************************************************************* multi_head = True #must use at the same time #use mlp layer after head cls_mlp = False #load the params to head2 load_head = False #use selayer before head selayer = True #use ttf head in centernet head use_ttf = True # pre_model_dir = '/home/jovyan/data-vol-1/zhangze/code/pytorch-ImageNet-CIFAR-COCO-VOC-training/detection_experiments/resnet18_centernet_coco_distributed_apex_resize512/checkpoints/best.pth' pre_model_dir = "/home/jovyan/data-vol-1/zhangze/code/multi_task/train/checkpoints_multi_v2/best.pth" #head1 head2 classes num_classes = [41, 1] #********************************************************************************* seed = 0 input_image_size = 512 use_multi_scale = False multi_scale_range = [0.6, 1.4] stride = 4 train_dataset = CocoDetection(image_root_dir=train_dataset_path, annotation_root_dir=dataset_annotations_path, set="train2017", transform=transforms.Compose([ RandomFlip(flip_prob=0.5), RandomCrop(crop_prob=0.5), RandomTranslate(translate_prob=0.5), Normalize(), ])) val_dataset = CocoDetection(image_root_dir=val_dataset_path, annotation_root_dir=dataset_annotations_path, set="val2017", transform=transforms.Compose([ Normalize(), Resize(resize=input_image_size), ])) epochs = 140 milestones = [90, 120] per_node_batch_size = 16 lr = 5e-4 # lr = 1e-5 num_workers = 4 print_interval = 100 apex = True sync_bn = False
class Config(object): version = 1 log = './multi_task/log_' + str(version) # Path to save log checkpoint_path = '/home/jovyan/data-vol-polefs-1/small_sample/multi_task/checkpoints/v{}'.format(version) # Path to store checkpoint model resume = '/home/jovyan/data-vol-polefs-1/small_sample/multi_task/checkpoints/v{}/latest.pth'.format(version) # load checkpoint model pre_model_dir = '/home/jovyan/data-vol-polefs-1/small_sample/multi_task/checkpoints/v1/best.pth'.format(version-1) evaluate = None # evaluate model path base_path = '/home/jovyan/data-vol-polefs-1/small_sample/dataset' train_dataset_path = os.path.join(base_path, 'images/images') val_dataset_path = os.path.join(base_path, 'images/images') dataset_annotations_path = os.path.join("/home/jovyan/data-vol-polefs-1/small_sample/multi_task", 'annotations/v{}'.format(version)) network = "resnet50_yolof" seed = 0 #resize input_image_size = 667 num_classes = 1 train_dataset = CocoDetection(image_root_dir=train_dataset_path, annotation_root_dir=dataset_annotations_path, set="train", transform=transforms.Compose([ RandomFlip(flip_prob=0.5), RandomCrop(crop_prob=0.5), RandomTranslate(translate_prob=0.5), Normalize(), Resize(resize=input_image_size) ])) val_dataset = CocoDetection(image_root_dir=val_dataset_path, annotation_root_dir=dataset_annotations_path, set="val", transform=transforms.Compose([ Normalize(), Resize(resize=input_image_size), ])) #***********************************************************# #use the pretrained backbone pretrained=True #freeze the backbone and neck # freeze = False #load the previous model to train,if v1:use the coco_pretrained,else use the latest version model # previous = False #train multi task head multi_task = False #***********************************************************# #fpn #use yolof neck use_yolof = True #fpn encode channels fpn_out_channels=512 use_p5=True #***********************************************************# #head class_num=1 #use gn in head use_GN_head=False prior=0.01 cnt_on_reg=True #yolof yolof_encoder_channels = 512 #ttf up use_ttf = True ttf_out_channels = [256, 128] # dubble_run = True #***********************************************************# #training epochs = 24 per_node_batch_size = 4 lr = 1e-4 num_workers = 4 print_interval = 100 eval_interval = 4 apex = True sync_bn = False #down sample strides strides=[16] #limit in decoder limit_range=[[-1,99999]] # limit_range=[[-1,667]] #scales parameter in head scales = [1.0] #***********************************************************# #inference score_threshold=0.05 nms_iou_threshold=0.6 max_detection_num=1000