コード例 #1
0
def prepare_data():
     #Set the training configuration first
    cfg_path="models/pvanet/lite/train_1K.yml"
    cfg_from_file(cfg_path)
    
    """
     1. PREPARING DATASET
    """


    #Firstly, prepare the dataset for fine-tuning 
    #Different kind of dataset is wrapped by the IMDB class, originally designed by Ross Girshick    
    
    #You need to put coco data directory(soft-link works as well) under the PVA-NET directory
    #COCO IMDB needs two parameter: data-split and year    , "Sedans_1", "Sedans_2"
    #coco_train = coco("train", "20SNAPSHOT_ITERS: 6014")   
    main_classes = CLASS_SETS["3-car"]
  
    mapper = { "trailer-head": '__background__', 'person': '__background__', 'scooter': '__background__', 'bike':'__background__', "motorcycle": "__background__", "bicycle": "__background__", "truck":"__background__", "bus":"__background__"}  
    vatic_names = ["A1HighwayDay", 'B2HighwayNight', "pickup", "tanktruck", "van", "PU_Van", "Sedans_1", "Sedans_2"]
    vatics = [VaticData(vatic_name, main_classes, CLS_mapper=mapper, train_split="all") for vatic_name in vatic_names] 
    NCTU_VIDEOS = [13, 17, 18, 19, 20, 3, 36, 38, 4, 5 ,6, 7, 8, 9, 10, 11, 12]
    NCTU_vatic_names = ["NCTU_{}.MOV".format(video) for video in NCTU_VIDEOS]
    NCTU_vatics = [VaticData(vatic_name, main_classes, CLS_mapper=mapper, train_split="all") for vatic_name in NCTU_vatic_names]   
   
    imdb_group = IMDBGroup(vatics + NCTU_vatics) 
    #imdb_group = IMDBGroup(vatics)       
    imdb, roidb = combined_roidb(imdb_group)
    total_len = float(len(imdb_group.gt_roidb()))
    print(total_len)
   
    return roidb
コード例 #2
0
ファイル: rc2.py プロジェクト: jeffshih/autoTrain
def prepare_data():
    #Set the training configuration first
    cfg_path = "models/pvanet/lite/train.yml"
    cfg_from_file(cfg_path)
    """
     1. PREPARING DATASET
    """

    #Firstly, prepare the dataset for fine-tuning
    #Different kind of dataset is wrapped by the IMDB class, originally designed by Ross Girshick

    #You need to put coco data directory(soft-link works as well) under the PVA-NET directory
    #COCO IMDB needs two parameter: data-split and year
    coco_train = coco("train", "2014")
    coco_val = coco("val", "2014")

    #Fetch the classes of coco dataet, this will be useful in the following section
    classes = coco_val._classes

    #Next, we import the VOC dataset via pascal_voc wrapper
    #Since VOC and COCO data have different naming among classes, a naming mapper is needed to unify the class names
    mapper = {
        "tvmonitor": "tv",
        "sofa": "couch",
        "aeroplane": "airplane",
        "motorbike": "motorcycle",
        "diningtable": "dining table",
        "pottedplant": "potted plant"
    }

    #Finnaly, let's wrap datasets from Vatic.
    #A vatic dataset directory should be located under ~/data/ directory in the naming of data-*
    #For example: ~/data/data-YuDa,  ~/data/data-A1HighwayDay
    vatic_names = [
        "YuDa", "A1HighwayDay", "B2HighwayNight", "airport", "airport2"
    ]
    mapper = {"van":"car", "trailer-head":"truck",\
              "sedan/suv":"car", "scooter":"motorcycle", "bike":"bicycle"}

    vatics = [
        VaticData(vatic_name, classes, CLS_mapper=mapper, train_split="all")
        for vatic_name in vatic_names
    ]

    #Combine all the IMDBs into one single IMDB for training
    datasets = vatics + [coco_train, coco_val]
    imdb_group = IMDBGroup(datasets)
    imdb, roidb = combined_roidb(imdb_group)

    total_len = float(len(imdb_group.gt_roidb()))

    #Show the dataset percentage in the whole composition
    for dataset in imdb_group._datasets:
        img_nums = len(dataset.gt_roidb())
        print(dataset.name, img_nums,
              "{0:.2f}%".format(img_nums / total_len * 100))

    return roidb