Example #1
0
    def __getitem__(self, idx):

        if self.use_src and not self.use_tgt:
            record = self.recs.read_idx(self.idxs[idx])
            header, im = recordio.unpack(record)
            im = image.imdecode(im, self._flag)
            l = header.label
            if self.tforms is not None:
                im = self.tforms(im)

            return im, l
        elif self.use_tgt and not self.use_src:
            record = self.rect.read_idx(self.idxt[idx])
            header, im = recordio.unpack(record)
            im = image.imdecode(im, self._flag)
            l = header.label
            if self.tformt is not None:
                im = self.tformt(im)

            return im, l
        else:
            # online sample pairs generation
            records = self.recs.read_idx(self.idxs[idx])
            hs, ims = recordio.unpack(records)
            ims = image.imdecode(ims, self._flag)
            ls = hs.label
            if self.tforms is not None:
                ims = self.tforms(ims)

            rnd = random.uniform(0, 1)
            if rnd > 1. / (1 + self._ratio):
                # random select class
                cls_set = set(self.idxt_cls.keys())
                cls_set.remove(ls)
                idx = random.randint(0, len(cls_set) - 1)
                ys = list(cls_set)[idx]
                # random select the negative samples
                idx = random.randint(0, len(self.idxt_cls[ys]) - 1)
                idx = self.idxt_cls[ys][idx]
            else:
                idx = random.randint(0, len(self.idxt_cls[ls]) - 1)
                idx = self.idxt_cls[ls][idx]

            recordt = self.rect.read_idx(self.idxt[idx])
            ht, imt = recordio.unpack(recordt)
            imt = image.imdecode(imt, self._flag)
            lt = ht.label

            if self.tformt is not None:
                imt = self.tformt(imt)

            yc = 1 if ls == lt else 0

            return ims, ls, imt, lt, yc
Example #2
0
    def __getitem__(self, idx):
        r1 = self.rec1.read_idx(self.idx1[idx[0]])
        h1, im1 = recordio.unpack(r1)
        im1 = image.imdecode(im1, self._flag)
        l1 = h1.label
        im1 = self.tform1(im1)

        r2 = self.rec2.read_idx(self.idx2[idx[1]])
        h2, im2 = recordio.unpack(r2)
        im2 = image.imdecode(im2, self._flag)

        im2_1 = self.tform2(im2)
        im2_2 = self.tform2(im2)

        return im1, l1, im2_1, im2_2
def read(buf, flag=1, to_rgb=True, out=None):
    """
    Read and decode an image to an NDArray.
    Input image NDArray should has dim_order of 'HWC'.

    Note: `imread` uses OpenCV (not the CV2 Python library).
    MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.

    :param buf: str/bytes or numpy.ndarray
        Binary image data as string or numpy ndarray.
    :param flag:  {0, 1}, default 1
        1 for three channel color output. 0 for grayscale output.
    :param to_rgb:  bool, default True
        True for RGB formatted output (MXNet default).
        False for BGR formatted output (OpenCV default).
    :param out:  NDArray, optional
        Output buffer. Use `None` for automatic allocation.
    :return: NDArray
        An `NDArray` containing the image.

    Example
    -------
    >>> buf = open("flower.jpg", 'rb').read()
    >>> image.read(buf)
    <NDArray 224x224x3 @cpu(0)>
    """
    return img.imdecode(buf, flag, to_rgb, out)
Example #4
0
def predict(task, saved_path):
    logging.info('Training Finished. Starting Prediction.\n')
    rank_root = '/data/fashion/data/attribute/datasets_david/rank'
    f_out = open('submission/%s.csv' % (task), 'w+')
    with open(rank_root + '/Tests/question.csv', 'r') as f_in:
        lines = f_in.readlines()
    tokens = [l.rstrip().split(',') for l in lines]
    task_tokens = [t for t in tokens if t[1] == task]
    n = len(task_tokens)
    cnt = 0

    predictor_net = build_model()
    predictor_ctx = mx.gpu(num_gpus[0]) if len(num_gpus) > 0 else mx.cpu()
    predictor_net.load_params(saved_path, ctx=predictor_ctx)
    logging.info("load model from %s" % saved_path)

    for path, task, _ in task_tokens:
        img_path = os.path.join(rank_root, path)
        with open(img_path, 'rb') as f:
            img = image.imdecode(f.read())
        data = transform_predict(img)
        out = predictor_net(data.as_in_context(predictor_ctx))
        out = nd.SoftmaxActivation(out).mean(axis=0)

        pred_out = ';'.join(["%.8f" % (o) for o in out.asnumpy().tolist()])
        line_out = ','.join([path, task, pred_out])
        f_out.write(line_out + '\n')
        cnt += 1
        progressbar(cnt, n)
    f_out.close()
Example #5
0
def predict(task):
    net = gluon.model_zoo.vision.get_model(model_name)
    with net.name_scope():
        net.output = nn.Dense(task_num_class)
    net.load_params('../../data/%s_%s.params' % (task, model_name),
                    ctx=mx.gpu(1))
    logging.info('Training Finished. Starting Prediction.\n')
    f_out = open('../../data/submission/%s_%s.csv' % (task, model_name), 'w')
    with open('../../data/z_rank/Tests/question.csv', 'r') as f_in:
        lines = f_in.readlines()
    tokens = [l.rstrip().split(',') for l in lines]
    task_tokens = [t for t in tokens if t[1] == task]
    n = len(task_tokens)
    cnt = 0
    for path, task, _ in task_tokens:
        img_path = os.path.join('../../data/z_rank', path)
        with open(img_path, 'rb') as f:
            img = image.imdecode(f.read())
        data = transform_predict(img)
        out = net(data.as_in_context(mx.gpu(1)))
        out = nd.SoftmaxActivation(out).mean(axis=0)

        pred_out = ';'.join(["%.8f" % (o) for o in out.asnumpy().tolist()])
        line_out = ','.join([path, task, pred_out])
        f_out.write(line_out + '\n')
        cnt += 1
        progressbar(cnt, n)
    f_out.close()
def predict(task):
    logging.info('Training Finished. Starting Prediction.\n')
    f_out = open('submission/%s.csv' % (task), 'w')
    with open('data2/week-rank/Tests/question.csv', 'r') as f_in:
        lines = f_in.readlines()
    tokens = [l.rstrip().split(',') for l in lines]
    task_tokens = [t for t in tokens if t[1] == task]
    n = len(task_tokens)
    cnt = 0
    for path, task, _ in task_tokens:
        img_path = os.path.join('data2/week-rank', path)
        with open(img_path, 'rb') as f:
            img = image.imdecode(f.read())
        out_all = np.zeros([
            task_list[task],
        ])
        ###### Test Time augmentation (muti-scale test) ######
        for scale in input_scale:
            data = transform_predict(img, scale)
            with ag.predict_mode():
                out = net(data.as_in_context(
                    mx.gpu(0)))  # 随机crop十张图片,所以此处是10张图片的结果
                out = nd.SoftmaxActivation(out).mean(
                    axis=0)  # 取softmax,然后对十个结果取平均
                out_all += out.asnumpy()
        out = out_all / len(input_scale)

        pred_out = ';'.join(["%.8f" % (o) for o in out.tolist()])
        line_out = ','.join([path, task, pred_out])
        f_out.write(line_out + '\n')
        cnt += 1
        #progressbar(cnt, n)
    f_out.close()
Example #7
0
def lambda_handler(event, context):
    try:
        url = event['img_url']
        response = requests.get(url,
                                headers={'Access-Control-Allow-Origin': '*'})
        img = imdecode(response.content)

        # 3. common to Jupiter
        x, img = data.transforms.presets.ssd.transform_test([img], short=512)
        class_IDs, scores, bounding_boxs = net(x)
        output = utils.viz.plot_bbox(img,
                                     bounding_boxs[0],
                                     scores[0],
                                     class_IDs[0],
                                     class_names=net.classes)
        output.axis('off')
        f = BytesIO()
        output.figure.savefig(f, format='jpeg', bbox_inches='tight')
        s3_client.Bucket('dl-lambda-image-outgoing').put_object(
            Key='front_stairs.jpg', Body=f.getvalue())
        # end common 3

        return base64.b64encode(f.getvalue())
    except Exception as e:
        raise Exception('ProcessingError')
Example #8
0
def batch_net(dir, suffix, net):
    res_list1 = []
    res_list2 = []
    fw1 = open('../data/dense1920/net_data.csv', 'w', encoding='utf-8')
    fw2 = open('../data/dense1920/net_label.csv', 'w', encoding='utf-8')
    for root, dirs, files in os.walk(dir):
        for file in files:
            filepath = os.path.join(root, file)
            filesuffix = os.path.splitext(filepath)[1][1:]
            if (filesuffix in suffix):
                with open(filepath, 'rb') as f:
                    img = image.imdecode(f.read())
                data, _ = transform(img, -1, test_augs)
                data = data.expand_dims(axis=0)
                out = net(data.as_in_context(mx.gpu()))
                print(out.shape)
                out = out.reshape(
                    (30720))  # dense169 6656 dense201 7890  //122880
                out = out.as_in_context(mx.cpu())
                out = out.asnumpy()
                res = ''
                for item in out:
                    res += str(item) + ' '
                #print(res)
                fw1.write(res + '\n')
                label = root.strip().split('\\')[-1]
                fw2.write(label + '\n')
                print(label)
    fw1.close()
    fw2.close()
    print('net done!!!')
Example #9
0
def predict(task):
    logging.info('Training Finished. Starting Prediction.\n')
    f_out = open('submission/%s.csv'%(task), 'w')  #将测试结果写入到此文件

    #加载测试集中的图像,将网络检测结果写入到文件中
    with open('data/rank/Tests/question.csv', 'r') as f_in:
        lines = f_in.readlines()
    tokens = [l.rstrip().split(',') for l in lines]
    task_tokens = [t for t in tokens if t[1] == task]
    n = len(task_tokens)
    cnt = 0
    for path, task, _ in task_tokens:
        img_path = os.path.join('data/rank', path)
        with open(img_path, 'rb') as f:
            img = image.imdecode(f.read())
        data = transform_predict(img)
        out = net(data.as_in_context(mx.gpu(0)))
        out = nd.SoftmaxActivation(out).mean(axis=0)

        pred_out = ';'.join(["%.8f"%(o) for o in out.asnumpy().tolist()])
        line_out = ','.join([path, task, pred_out])
        f_out.write(line_out + '\n')
        cnt += 1
        progressbar(cnt, n)
    f_out.close()
Example #10
0
def read(buf, flag=1, to_rgb=True, out=None):

    """Read and decode an image to an NDArray.
    Input image NDArray should has dim_order of 'HWC'.

    Note: `imread` uses OpenCV (not the CV2 Python library).
    MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.

    Parameters
    ----------
    buf : str/bytes or numpy.ndarray
        Binary image data as string or numpy ndarray.
    flag : {0, 1}, default 1
        1 for three channel color output. 0 for grayscale output.
    to_rgb : bool, default True
        True for RGB formatted output (MXNet default).
        False for BGR formatted output (OpenCV default).
    out : NDArray, optional
        Output buffer. Use `None` for automatic allocation.

    Returns
    -------
    NDArray
        An `NDArray` containing the image.

    Example
    -------
    >>> buf = open("flower.jpg", 'rb').read()
    >>> image.read(buf)
    <NDArray 224x224x3 @cpu(0)>
    """
    return img.imdecode(buf, flag, to_rgb, out)
Example #11
0
    def predict_cropped_images(self, dataset_path, model_path, task, gpus, network='densenet201', loss_type='sfe'):

        # with Path(dataset_path, 'Annotations/%s.csv' % task).open('r') as f:
        #     self.task_tokens = [l.rstrip().split(',') for l in f.readlines()]
        # self.task_tokens = [t for t in tokens if t[1] == task]

        results_path = self.output_submission_path.joinpath('%s.csv'%(task))
        f_out = results_path.open('w+')
        ctx = self.get_ctx()[0]

        net = get_symbol(network, task_class_num_list[task], ctx)
        net.load_params(model_path, ctx=ctx)
        logging.info("load model from %s" % model_path)

        for index, task_token in enumerate(self.task_tokens):
            img_path, raw_task = task_token[:2]
            assert raw_task == task, "task not match"
            with Path(dataset_path, img_path).open('rb') as f:
                raw_img = f.read()
            img = image.imdecode(raw_img)
            data = utils.transform_cropped_img(img)
            out = net(data.as_in_context(ctx))
            out = nd.SoftmaxActivation(out).mean(axis=0)
            pred_out = ';'.join(["%.8f"%(o) for o in out.asnumpy().tolist()])
            line_out = ','.join([img_path, task, pred_out])
            f_out.write(line_out + '\n')
            utils.progressbar(index, len(self.task_tokens))
        f_out.close()
        logging.info("end predicting for %s, results saved at %s" % (task, results_path))
Example #12
0
    def pre_processing(self, img_path):
        """
        对图片进行处理,先按照高度进行resize,resize之后如果宽度不足指定宽度,就补黑色像素,否则就强行缩放到指定宽度

        :param img_path: 图片地址
        :return:
        """
        data_augment = False
        if self.phase == 'train' and np.random.rand() > 0.5:
            data_augment = True
        if data_augment:
            img_h = 40
            img_w = 340
        else:
            img_h = self.img_h
            img_w = self.img_w
        img = image.imdecode(
            open(img_path, 'rb').read(), 1 if self.img_channel == 3 else 0)
        h, w = img.shape[:2]
        ratio_h = float(img_h) / h
        new_w = int(w * ratio_h)
        if new_w < img_w:
            img = image.imresize(img, w=new_w, h=img_h)
            step = nd.zeros((img_h, img_w - new_w, self.img_channel),
                            dtype=img.dtype)
            img = nd.concat(img, step, dim=1)
        else:
            img = image.imresize(img, w=img_w, h=img_h)

        if data_augment:
            img, _ = image.random_crop(img, (self.img_w, self.img_h))
        return img
Example #13
0
    def read_record(self, rec, idx):
        record = rec.read_idx(idx)
        header, im = recordio.unpack(record)
        im = image.imdecode(im)
        label = header.label

        return im, label
Example #14
0
    def pre_processing(self, img_path):
        """
        pre_processing

        :param img_path: path of img
        :return:
        """
        data_augment = False
        # if self.phase == 'train' and np.random.rand() > 0.5:
        #     data_augment = True
        if data_augment:
            img_h = 40
            img_w = 340
        else:
            img_h = self.img_h
            img_w = self.img_w

        img = image.imdecode(open(img_path, 'rb').read(), 1 if self.img_channel == 3 else 0)
        h, w = img.shape[:2]
        ratio_h = float(img_h) / h
        new_w = int(w * ratio_h)

        ################
        # img = image.imresize(img, w=self.img_w, h=self.img_h)
        if new_w < img_w:
            img = image.imresize(img, w=new_w, h=img_h)
            step = nd.zeros((img_h, img_w - new_w, self.img_channel), dtype=img.dtype)
            img = nd.concat(img, step, dim=1)
        else:
            img = image.imresize(img, w=img_w, h=img_h)

        # if data_augment:
        #     img, _ = image.random_crop(img, (self.img_w, self.img_h))
        return img
Example #15
0
def process_image(fname, data_shape):
    with open(fname, 'rb') as f:
        im = image.imdecode(f.read())

    data = image.imresize(im, data_shape, data_shape)
    data = data.astype('float32') - readData.rgb_mean

    return data.transpose((2, 0, 1)).expand_dims(axis=0), im
Example #16
0
def process_image(fname):
    with open(fname, 'rb') as f:
        im = image.imdecode(f.read())
    # resize to data_shape
    data = image.imresize(im, data_shape, data_shape)
    # minus rgb mean, divide std
    data = (data.astype('float32') - rgb_mean) / rgb_std
    return data.transpose((2,0,1)).expand_dims(axis=0), im
Example #17
0
def process_image(fname):
    with open(fname, 'rb') as f:
        im = image.imdecode(f.read())
    # resize to data_shape
    data = image.imresize(im, data_shape, data_shape)
    # minus rgb mean
    data = data.astype('float32') - rgb_mean
    # convert to batch x channel x height xwidth
    return data.transpose((2,0,1)).expand_dims(axis=0), im
Example #18
0
def process_image(fname):
    with open(fname, 'rb') as f:
        im = image.imdecode(f.read())
    # resize to data_shape
    data = image.imresize(im, data_shape, data_shape)
    # minus rgb mean
    data = data.astype('float32') - rgb_mean
    # convert to batch x channel x height xwidth
    return data.transpose((2, 0, 1)).expand_dims(axis=0), im
Example #19
0
def augument(data_path, label, image_name, save_path, size=224, training = True):
    image_path = os.path.join(data_path, image_name)
    (name, extension) = splitfilename(image_name)
    extension = extension.lower()
    if extension not in IMG_EXTS:
        print('filered image: %s' % image_name)
        return
    try:
        img = image.imdecode(open(image_path, 'rb').read()).astype('float32')
    except Exception as ex:
        print("error: ", ex)
        return
    if label is not None:
        label_path = os.path.join(save_path, label)
    else:
        label_path = save_path
    mkdir(label_path)

    if training:
        aug1 = image.HorizontalFlipAug(0.5)
        aug2 = image.HorizontalFlipAug(.5)

        img = image.resize_short(img, size=384, interp=2)

        center_crop, _ = image.center_crop(img, size=(size, size))
        new_name = "%s_%s%s" % (name, "0", extension)
        cv.imwrite(os.path.join(label_path, new_name), center_crop.asnumpy())

        random_crop, _ = image.random_crop(img, size=(size, size))
        new_name = "%s_%s%s" % (name, "1", extension)
        cv.imwrite(os.path.join(label_path, new_name), random_crop.asnumpy())

        random_crop, _ = image.random_crop(img, size=(size, size))
        new_name = "%s_%s%s" % (name, "2", extension)
        cv.imwrite(os.path.join(label_path, new_name), random_crop.asnumpy())

        random_crop, _ = image.random_crop(img, size=(size, size))
        new_name = "%s_%s%s" % (name, "3", extension)
        cv.imwrite(os.path.join(label_path, new_name), random_crop.asnumpy())

        img_aug1 = aug1(random_crop).clip(0,255)
        new_name = "%s_%s%s" % (name, "4", extension)
        cv.imwrite(os.path.join(label_path, new_name), img_aug1.asnumpy())

        img_aug2 = aug2(center_crop).clip(0, 255)
        new_name = "%s_%s%s" % (name, "5", extension)
        cv.imwrite(os.path.join(label_path, new_name), img_aug2.asnumpy())

        img_resize = image.imresize(img, w=size, h=size, interp=2)
        new_name = "%s_%s%s" % (name, "6", extension)
        cv.imwrite(os.path.join(label_path, new_name), img_resize.asnumpy())
    else:
        img = image.resize_short(img, size=size)
        img, _ = image.center_crop(img, size=(size, size))
        new_name = "%s%s" % (name, extension)
        cv.imwrite(os.path.join(label_path, new_name), img.asnumpy())
Example #20
0
    def __getitem__(self, idx):
        # We don't assume up-front that annotation and image fields are in a particular (or 
        # guaranteed) order:
        record = super(AugmentedManifestImageRecordDataset, self).__getitem__(idx)
        # TODO: Support non-object annotation fields (e.g. for classification use cases)
        # Boolean list of whether each field is JSON:
        fieldsjson = [field[0] == b"{"[0] for field in record]  # Binary in Python is weird...
        njsonfields = sum(fieldsjson)
        if njsonfields != 1:
            raise ValueError(
                f"Record had {njsonfields} JSON annotation fields out of {len(record)} total: "
                "Expected exactly one"
            )
        # Take first JSON and first non-JSON field to be the header and the image, respectively:
        label = json.loads(record[fieldsjson.index(True)])
        img = record[fieldsjson.index(False)]

        if self._transform is not None:
            return self._transform(image.imdecode(img, self._flag), label)
        return image.imdecode(img, self._flag), label
Example #21
0
def testClassify(net, fname):
    with open(fname, 'rb') as f:
        img = image.imdecode(f.read())
    data, _ = transformTest(img, -1, test_augs)
    plt.imshow(data.transpose((1, 2, 0)).asnumpy() / 255)
    data = data.expand_dims(axis=0)
    out = net(data)
    out = nd.SoftmaxActivation(out)
    pred = int(nd.argmax(out, axis=1).asscalar())
    prob = out[0][pred].asscalar()
    label = train_set.synsets
    return ('With prob=%f, %s'%(prob, label[pred]))
Example #22
0
    def __getitem__(self, idx):
        r = self.rec.read_idx(idx)
        h, im = recordio.unpack(r)
        im = image.imdecode(im, self._flag)
        l = h.label

        pseudo_l = self.labels[idx]

        if self.tform is not None:
            im = self.tform(im)

        return im, l, pseudo_l
Example #23
0
 def __getitem__(self, idx):
     while True:
         record = super().__getitem__(idx)
         header, img = recordio.unpack(record)
         if _check_valid_image(img):
             decoded_img = image.imdecode(img, self._flag)
         else:
             idx = np.random.randint(low=0, high=len(self))
             continue
         if self._transform is not None:
             return self._transform(decoded_img, header.label)
         return decoded_img, header.label
Example #24
0
    def __getitem__(self, idx):
        if self.use1 and not self.use2:
            record = self.rec1.read_idx(self.idx1[idx])
            h, im = recordio.unpack(record)
            im = image.imdecode(im, self._flag)
            l = h.label
            if self.tform1 is not None:
                im = self.tform1(im)

            return im, l
        elif self.use2 and not self.use1:
            record = self.rec1.read_idx(self.idx2[idx])
            h, im = recordio.unpack(record)
            im = image.imdecode(im, self._flag)
            l = h.label
            if self.tform2 is not None:
                im = self.tform2(im)

            return im, l
        else:
            # online sample pairs generation
            r1 = self.rec1.read_idx(self.idx1[idx[0]])
            h1, im1 = recordio.unpack(r1)
            im1 = image.imdecode(im1, self._flag)
            l1 = h1.label
            if self.tform1 is not None:
                im1 = self.tform1(im1)

            r2 = self.rec2.read_idx(self.idx2[idx[1]])
            h2, im2 = recordio.unpack(r2)
            im2 = image.imdecode(im2, self._flag)
            l2 = h2.label

            if self.tform2 is not None:
                im2 = self.tform2(im2)

            yc = 1 if l1 == l2 else 0

            return im1, l1, im2, l2, yc
Example #25
0
def classify_hotdog(net, fname):
    with open(fname, 'rb') as f:
        img = image.imdecode(f.read())
    data, _ = transform(img, -1, test_augs)
    plt.imshow(data.transpose((1, 2, 0)).asnumpy() / 255)
    data = data.expand_dims(axis=0)
    out = net(data.as_in_context(ctx))
    out = nd.SoftmaxActivation(out)
    pred = int(nd.argmax(out, axis = 1).asscalar())
    prob = out[0][pred].asscalar
    label = train_imgs.synsets
    #print('With pred = ', pred, label[pred])
    print('With prob = ', prob, label[pred])
Example #26
0
def score_image(image_base64_string):
    with open('target_image.jpg', 'wb') as f:
        f.write(base64.b64decode(image_base64_string))
        f.close()
    with open('target_image.jpg', 'rb') as f:
        img = image.imdecode(f.read())
    data, _ = transform(img, -1, test_augs)
    data.transpose((1, 2, 0)).asnumpy()/255
    data = data.expand_dims(axis=0)
    net.forward(batch([data]), is_train=False)
    out = net.get_outputs()[0]
    out = nd.SoftmaxActivation(out)
    return int(out[0][1].asscalar() * 100)
Example #27
0
def image_processing(im_path):
    try:
        with open(im_path, 'rb') as f:
            im = image.imdecode(f.read())

        for aug_func in augmenter:
            im = aug_func(im)

        im = im.transpose((2, 0, 1)).expand_dims(axis=0)

        return im
    except IOError as e:
        print(e)
        return None
def classify(fname):
    train_ds = vision.ImageFolderDataset('train',
                                         flag=1,
                                         transform=transform_train)
    with open(fname, 'rb') as f:
        img = image.imdecode(f.read())
    data = image.imresize(img.astype('float32') / 255, 32, 32)
    data = nd.transpose(data, (2, 0, 1))
    data = data.expand_dims(axis=0)
    net = get_net(mx.cpu(0))
    net.load_params('model.params', mx.cpu(0))
    out = net(data.as_in_context(mx.cpu(0)))
    out = nd.SoftmaxActivation(out)
    pred = int(nd.argmax(out, axis=1).asscalar())
    label = train_ds.synsets
    return label[pred]
Example #29
0
 def pre_processing(self, img_path):
     """
     对图片进行处理,先按照高度进行resize,resize之后如果宽度不足指定宽度,就补黑色像素,否则就强行缩放到指定宽度
     :param img_path: 图片地址
     :return:
     """
     img = image.imdecode(
         open(img_path, 'rb').read(), 1 if self.img_channel == 3 else 0)
     h, w = img.shape[:2]
     ratio_h = float(self.img_h) / h
     new_w = int(w * ratio_h)
     img = image.imresize(img, w=new_w, h=self.img_h)
     # if new_w < self.img_w:
     #     step = nd.zeros((self.img_h, self.img_w - new_w, self.img_channel), dtype=img.dtype)
     #     img = nd.concat(img, step, dim=1)
     return img
Example #30
0
    def imread(self, path, flag=1):
        with open(path, 'rb') as f:
            raw = f.read()
        k = image.imdecode(raw, flag)
        return k

        def view_sample_predictions(self, net, loader, n, ctx):
            data, label = next(iter(loader))
            data = data.as_in_context(ctx=ctx)
            label = label.as_in_context(ctx=ctx)
            output = net(data)
            pred = nd.argmax(output, axis=1)
            batch_size = data.shape[0]
            for i in range(min(n, batch_size)):
                self.view_image(data[i])
                self.view_annotated(label[i])
                self.view_annotated(pred[i])
Example #31
0
def predict(pic):
    # If using different model, change below
    model_name = 'ResNet50_v2'
    net = get_model(model_name, pretrained=True)
    classes = net.classes
    # Take image and return ndarray
    img = image.imdecode(pic)
    # Default data preprocessing
    img = transform_eval(img)
    pred = net(img)
    topK = 1
    ind = nd.topk(pred, k=topK)[0].astype('int')
    for i in range(topK):
        result = [
            classes[ind[i].asscalar()],
            nd.softmax(pred)[i][ind[i]].asscalar()
        ]
    return result
def lambda_handler(event, context):
    try:
        url = event['img_url']
        response = requests.get(url)
        img = imdecode(response.content)
        x, img = data.transforms.presets.yolo.transform_test([img], short=320)
        class_IDs, scores, bounding_boxs = net(x)
        output = utils.viz.plot_bbox(img,
                                     bounding_boxs[0],
                                     scores[0],
                                     class_IDs[0],
                                     class_names=net.classes)
        output.axis('off')
        f = BytesIO()
        output.figure.savefig(f, format='jpeg', bbox_inches='tight')
        return base64.b64encode(f.getvalue())
    except Exception as e:
        raise Exception('ProcessingError')