def stanford_dogs(): reader = core.create_reader('https://www.kaggle.com/jessicali9530/stanford-dogs-dataset') images = reader.list_images() entries = [] for img in images: xml_fp = 'annotations/Annotation/'+img.parent.name+'/'+img.stem for label in object_detection.parse_voc_annotation(reader.open(xml_fp)): label.file_path = str(img) entries.append(label) return Dataset(pd.DataFrame(entries), reader)
def stanford_dogs(): reader = ad.create_reader( ad.download('kaggle:jessicali9530/stanford-dogs-dataset')) images = reader.list_images() entries = [] for img in images: xml_fp = 'annotations/Annotation/' + img.parent.name + '/' + img.stem for label in dataset.parse_voc_annotation(reader.open(xml_fp)): label.filepath = str(img) entries.append(label) return Dataset(pd.DataFrame(entries), reader)
def wheat(): reader = core.create_reader('https://www.kaggle.com/c/global-wheat-detection') df = pd.read_csv(reader.open('train.csv')) bbox = df.bbox.str.split(',', expand=True) xmin = bbox[0].str.strip('[ ').astype(float) / df.width ymin = bbox[1].str.strip(' ').astype(float) / df.height df = pd.DataFrame({ 'file_path':'train/'+df.image_id+'.jpg', 'xmin':xmin, 'ymin':ymin, 'xmax':bbox[2].str.strip(' ').astype(float) / df.width + xmin, 'ymax':bbox[3].str.strip(' ]').astype(float) / df.height + ymin, 'class_name':df.source}) return Dataset(df, reader)
def wheat(): def train_df_fn(reader): df = pd.read_csv(reader.open('train.csv')) bbox = df.bbox.str.split(',', expand=True) xmin = bbox[0].str.strip('[ ').astype(float) / df.width ymin = bbox[1].str.strip(' ').astype(float) / df.height return pd.DataFrame({ 'filepath': 'train/' + df.image_id + '.jpg', 'xmin': xmin, 'ymin': ymin, 'xmax': bbox[2].str.strip(' ').astype(float) / df.width + xmin, 'ymax': bbox[3].str.strip(' ]').astype(float) / df.height + ymin, 'classname': df.source }) return Dataset.from_df_func('kaggle:global-wheat-detection', train_df_fn)
def make_ml(name): camel_case = name.replace('-',' ').title().replace(' ', '') url = f'https://arcraftimages.s3-accelerate.amazonaws.com/Datasets/{camel_case}/{camel_case}PascalVOC.zip' return Dataset.from_voc(url, 'images', 'annotations')
#@save_cell def make_ml(name): camel_case = name.replace('-',' ').title().replace(' ', '') url = f'https://arcraftimages.s3-accelerate.amazonaws.com/Datasets/{camel_case}/{camel_case}PascalVOC.zip' return Dataset.from_voc(url, 'images', 'annotations') names = ['sheep', 'paper-prototype', 'raccoon', 'boggle-boards', 'plant-doc', 'hard-hat-workers', 'pistol', 'cars-and-traffic-signs', 'tomato', 'dice', 'potholes', 'ships', 'mask', 'chess', 'mobile-phones', 'glasses', 'road-signs', 'fruits', 'bikes', 'headphones', 'fish', 'drone', 'car-license-plates', 'pets', 'faces', 'helmets', 'clothing', 'hands', 'soccer-ball' ] for name in names: Dataset.add(name, make_ml, [name]) #@save_cell from d8 import core from d8 import object_detection import pandas as pd @Dataset.add def stanford_dogs(): reader = core.create_reader('https://www.kaggle.com/jessicali9530/stanford-dogs-dataset') images = reader.list_images() entries = [] for img in images: xml_fp = 'annotations/Annotation/'+img.parent.name+'/'+img.stem for label in object_detection.parse_voc_annotation(reader.open(xml_fp)): label.file_path = str(img)
ag.Int(3, 6), # [8, 16, 32, 64] 'momentum': ag.Real(0.85, 0.95), 'wd': ag.Real(1e-6, 1e-2, log=True), 'epochs': 20, 'num_trials': args.num_trials, 'search_strategy': 'bayesopt' } # specify learning task task = ObjectDetection(config) # specify dataset dataset = Dataset.get(args.dataset) train_data, valid_data = dataset.split(0.8) # fit auto estimator detector = task.fit(train_data, valid_data) # evaluate auto estimator eval_map = detector.evaluate(valid_data) logging.info('evaluation: mAP={}'.format(eval_map[-1][-1])) # save and load auto estimator detector.save('ssd_detector.pkl') detector = ObjectDetection.load('ssd_detector.pkl')
from gluoncv.auto.tasks.utils import config_to_nested from d8.object_detection import Dataset if __name__ == '__main__': # specify hyperparameters config = { 'dataset': 'sheep', 'gpus': [0, 1, 2, 3, 4, 5, 6, 7], 'estimator': 'ssd', 'base_network': 'resnet50_v1', 'data_shape': 512, 'batch_size': 64, # range [8, 16, 32, 64] 'epochs': 3 } config = config_to_nested(config) config.pop('estimator') # specify dataset dataset = Dataset.get('sheep') train_data, valid_data = dataset.split(0.8) # specify estimator estimator = SSDEstimator(config) # fit estimator estimator.fit(train_data, valid_data) # evaluate auto estimator eval_map = estimator.evaluate(valid_data) logging.info('evaluation: mAP={}'.format(eval_map[-1][-1]))