def test_transforms_presets_ssd(): im_fname = gcv.utils.download('https://github.com/dmlc/web-data/blob/master/' + 'gluoncv/detection/biking.jpg?raw=true', path='biking.jpg') x, orig_img = ssd.load_test(im_fname, short=512) x1, orig_img1 = ssd.transform_test(mx.image.imread(im_fname), short=512) np.testing.assert_allclose(x.asnumpy(), x1.asnumpy()) np.testing.assert_allclose(orig_img, orig_img1) if not osp.isdir(osp.expanduser('~/.mxnet/datasets/voc')): return train_dataset = VOCDetectionTiny() val_dataset = VOCDetectionTiny(splits=[('tiny_motorbike', 'test')]) width, height = (512, 512) net = gcv.model_zoo.get_model('ssd_512_resnet50_v1_voc', pretrained=False, pretrained_base=False) net.initialize() num_workers = 0 batch_size = 4 with autograd.train_mode(): _, _, anchors = net(mx.nd.zeros((1, 3, height, width))) batchify_fn = Tuple(Stack(), Stack(), Stack()) # stack image, cls_targets, box_targets train_loader = gluon.data.DataLoader( train_dataset.transform(ssd.SSDDefaultTrainTransform(width, height, anchors)), batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers) val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1)) val_loader = gluon.data.DataLoader( val_dataset.transform(ssd.SSDDefaultValTransform(width, height)), batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers) train_loader2 = gluon.data.DataLoader( train_dataset.transform(ssd.SSDDefaultTrainTransform(width, height)), batch_size, True, batchify_fn=val_batchify_fn, last_batch='rollover', num_workers=num_workers) for loader in [train_loader, val_loader, train_loader2]: for i, batch in enumerate(loader): if i > 1: break pass
def test_transforms_presets_ssd(): im_fname = gcv.utils.download('https://github.com/dmlc/web-data/blob/master/' + 'gluoncv/detection/biking.jpg?raw=true', path='biking.jpg') x, orig_img = ssd.load_test(im_fname, short=512) x1, orig_img1 = ssd.transform_test(mx.image.imread(im_fname), short=512) np.testing.assert_allclose(x.asnumpy(), x1.asnumpy()) np.testing.assert_allclose(orig_img, orig_img1) if not osp.isdir(osp.expanduser('~/.mxnet/datasets/voc')): return train_dataset = gcv.data.VOCDetection(splits=((2007, 'trainval'), (2012, 'trainval'))) val_dataset = gcv.data.VOCDetection(splits=[(2007, 'test')]) width, height = (512, 512) net = gcv.model_zoo.get_model('ssd_512_resnet50_v1_voc', pretrained=False, pretrained_base=False) net.initialize() num_workers = 0 batch_size = 4 with autograd.train_mode(): _, _, anchors = net(mx.nd.zeros((1, 3, height, width))) batchify_fn = Tuple(Stack(), Stack(), Stack()) # stack image, cls_targets, box_targets train_loader = gluon.data.DataLoader( train_dataset.transform(ssd.SSDDefaultTrainTransform(width, height, anchors)), batch_size, True, batchify_fn=batchify_fn, last_batch='rollover', num_workers=num_workers) val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1)) val_loader = gluon.data.DataLoader( val_dataset.transform(ssd.SSDDefaultValTransform(width, height)), batch_size, False, batchify_fn=val_batchify_fn, last_batch='keep', num_workers=num_workers) train_loader2 = gluon.data.DataLoader( train_dataset.transform(ssd.SSDDefaultTrainTransform(width, height)), batch_size, True, batchify_fn=val_batchify_fn, last_batch='rollover', num_workers=num_workers) for loader in [train_loader, val_loader, train_loader2]: for i, batch in enumerate(loader): if i > 1: break pass
def detect(event, context): # get the url data = json.loads(event['body']) if 'url' not in data: response = {"statusCode": 500, "body": "Please specify a url"} return response url = data['url'] # download the image urlSplit = url.split('/') fileName = urlSplit[-1] filePath = wget.download(url, out="/tmp/{0}".format(fileName)) # classify the image x, img = load_test(filePath, short=512) classes, scores, bbox = ssdnet(x) results = [] # for each result, we'll take the each # them if their score is greater than a given threshold for i in range(len(scores[0])): if float(scores[0][i].asnumpy().tolist()[0]) > score_threshold: results.append({ "class": ssdnet.classes[int(classes[0][i].asnumpy().tolist()[0])], "score": float(scores[0][i].asnumpy().tolist()[0]), "bbox": bbox[0][i].asnumpy().tolist() }) # plot the box of the image and then store it in S3 plot_bbox(img, bbox[0], scores[0], classes[0], class_names=ssdnet.classes) tmpOutPath = "/tmp/detect_{0}".format(fileName) plt.savefig(tmpOutPath) s3_key = "images/detect_{0}".format(fileName) s3_bucket_name = "gudongfeng.me" s3.upload_file(tmpOutPath, s3_bucket_name, s3_key) body = { "bounding_boxes": results, "s3_url": getS3Url(s3_bucket_name, s3_key) } response = { "statusCode": 200, "body": json.dumps(body), "headers": { 'Access-Control-Allow-Origin': '*', 'Access-Control-Allow-Credentials': True, 'Content-Type': 'application/json' }, } return response
def detect(event, context): # get the url url = event.get('url', None) if not url: response = { "statusCode": 500, "body": "Please specify a url" } return response # download the image filename = wget.download(url, out="/tmp/image.jpg") # classify the image x, _ = load_test(filename, short=512) classes, scores, bbox = ssdnet(x) results = [] # for each result, we'll take the each # them if their score is greater than a given threshold for i in range(len(scores[0])): if float(scores[0][i].asnumpy().tolist()[0]) > score_threshold: results.append({ "class": ssdnet.classes[int(classes[0][i].asnumpy().tolist()[0])], "score": float(scores[0][i].asnumpy().tolist()[0]), "bbox": bbox[0][i].asnumpy().tolist() }) body = { "bounding_boxes": results } response = { "statusCode": 200, "body": body } return response