Пример #1
0
def has_eia():
    if K.backend() != 'mxnet':
        return False

    import mxnet as mx
    try:
        # try to create eia context
        mx.eia()
    except:
        return False

    return True
Пример #2
0
def test_prediction_with_eia():
    import mxnet as mx

    # 1. Download and save ImageNet Pre-Trained VGG-16
    model = VGG16(weights='imagenet', input_shape=(224, 224, 3))
    model.save("imagenet_vgg16.h5")

    # 2. Load the Model in EIA Context
    with K.Context("eia"):
        model = keras.models.load_model("imagenet_vgg16.h5")

    # Verify Model is loaded in EIA context
    assert model._context
    assert model._context[0] == mx.eia()

    # 3. Prepare inputs for prediction
    dummy_image1 = np.random.randint(low=0, high=255, size=(224, 224, 3))
    dummy_image1 = np.expand_dims(dummy_image1, axis=0)
    dummy_image1 = preprocess_input(dummy_image1)
    preds = model.predict(dummy_image1)
    assert len(decode_predictions(preds, top=3)[0]) == 3

    # 4. Test batch prediction
    dummy_image2 = np.random.randint(low=0, high=255, size=(224, 224, 3))
    dummy_image2 = np.expand_dims(dummy_image2, axis=0)
    dummy_image2 = preprocess_input(dummy_image2)

    batch_input = np.concatenate((dummy_image1, dummy_image2), axis=0)
    batch_preds = model.predict_on_batch(batch_input)
    assert len(batch_preds) == 2
    for pred in decode_predictions(batch_preds, top=3):
        assert len(pred[0]) == 3
def predict_fn(input_object, model):

    inp, img = input_object

    try:
        if os.environ['USE_EIA'] == "1":
            #use EIA for low cost GPU acceleration
            device = mx.eia()
            x = inp.copyto(device)

        elif os.environ['USE_GPU'] == "1":
            device = mx.gpu()
            x = inp.copyto(device)
        else:
            device = mx.cpu()
            x = inp
    except:
        logger.error("Failed to load data into desired context")
        device = mx.cpu()
        x = inp

    cid, scores, bbox = model(x)

    return {
        "cid": cid.asnumpy().tolist(),
        "scores": scores.asnumpy().tolist(),
        "bbox": bbox.asnumpy().tolist(),
        "img": img.tolist()
    }
Пример #4
0
def predict_fn(input_object, model):

    try:
        if os.environ['USE_EIA'] == "1":
            device = mx.eia()
            img, cid, scores, bbox = copy_to_device(input_object, device)
        elif os.environ['USE_GPU'] == "1":
            device = mx.gpu()
            img, cid, scores, bbox = copy_to_device(input_object, device)
        else:
            device = mx.cpu()
            img, cid, scores, bbox = input_object
    except:
        device = mx.cpu()
        img, cid, scores, bbox = input_object
        logger.error("Failed to load data into desired context")

    pose_input, upscale_bbox = detector_to_simple_pose(img, cid, scores, bbox)
    predicted_heatmap = model(pose_input.as_in_context(device))
    predicted_heatmap = model(pose_input)
    keypoints, confidence = heatmap_to_coord(predicted_heatmap, upscale_bbox)

    c = cid[0].asnumpy().reshape(cid[0].shape[0] * cid[0].shape[1])
    s = scores[0].asnumpy().reshape(scores[0].shape[0] * scores[0].shape[1])
    bb = bbox[0].asnumpy().reshape(bbox[0].shape[0] * bbox[0].shape[1])

    kp = keypoints.asnumpy().reshape(keypoints.shape[0] * keypoints.shape[1] *
                                     keypoints.shape[2])
    cfd = confidence.asnumpy().reshape(
        confidence.shape[0] * confidence.shape[1] * confidence.shape[2])

    return np.concatenate((c, s, bb, kp, cfd))
Пример #5
0
def load_sym_model(sym_f, param_f, model_dir) :

    try:
        if os.environ['USE_EIA'] == "1":
            device = [mx.eia()] #use EIA for low cost GPU acceleration
        else :
            #use cpu with MK DNN acceleration
            device = [mx.cpu()]
    except:
        device = [mx.cpu()]

    sym_file = os.path.join(model_dir, sym_f)
    param_file = os.path.join(model_dir, param_f)
    
    return gluon.nn.SymbolBlock.imports(sym_file, ['data'], param_file, ctx=device)
Пример #6
0
def predict_fn(input_object, model):

     try:
        if os.environ['USE_EIA'] == "1":
            #use EIA for low cost GPU acceleration
            input_object = input_object.copyto(mx.eia())
    except:
        logger.error("Failed to load data into EIA")     
    
    
    
    c= cid[0].asnumpy().reshape(cid[0].shape[0]*cid[0].shape[1])
    s=score[0].asnumpy().reshape(score[0].shape[0]*score[0].shape[1])
    bb= bbox[0].asnumpy().reshape(bbox[0].shape[0]*bbox[0].shape[1])
    
    return np.concatenate((c,s,bb))
Пример #7
0
    def default_model_fn(self, model_dir, preferred_batch_size=1):
        """Function responsible for loading the model. This implementation is designed to work with
        the default save function provided for MXNet training.

        Args:
            model_dir (str): The directory where model files are stored
            preferred_batch_size (int): preferred batch size of the model's data shape.
                Defaults to 1.

        Returns:
            mxnet.mod.Module: the loaded model.

        """
        for f in DEFAULT_MODEL_FILENAMES.values():
            path = os.path.join(model_dir, f)
            if not os.path.exists(path):
                raise ValueError(
                    'Failed to load model with default model_fn: missing file {}.'
                    'Expected files: {}'.format(f, [
                        file_name
                        for _, file_name in DEFAULT_MODEL_FILENAMES.items()
                    ]))

        shapes_file = os.path.join(model_dir,
                                   DEFAULT_MODEL_FILENAMES['shapes'])
        preferred_batch_size = preferred_batch_size or os.environ.get(
            PREFERRED_BATCH_SIZE_PARAM)
        data_names, data_shapes = read_data_shapes(shapes_file,
                                                   preferred_batch_size)

        sym, args, aux = mx.model.load_checkpoint(
            os.path.join(model_dir, DEFAULT_MODEL_NAME), 0)

        ctx = mx.eia() if os.environ.get(
            INFERENCE_ACCELERATOR_PRESENT_ENV
        ) == 'true' else get_default_context()

        mod = mx.mod.Module(symbol=sym,
                            context=ctx,
                            data_names=data_names,
                            label_names=None)
        mod.bind(for_training=False, data_shapes=data_shapes)
        mod.set_params(args, aux, allow_missing=True)

        return mod
def model_fn(model_dir):

    try:
        if os.environ['USE_EIA'] == "1":
            device = mx.eia()  #use EIA for low cost GPU acceleration
        elif os.environ['USE_GPU'] == "1":
            device = mx.gpu()
        else:
            #use cpu with MK DNN acceleration
            device = mx.cpu()
    except:
        logger.error("Failed to set desired device context.")
        device = mx.cpu()

    detector = model_zoo.get_model('yolo3_mobilenet1.0_coco',
                                   pretrained=True,
                                   ctx=device)
    detector.reset_class(["person"], reuse_weights=['person'])
    detector.hybridize()

    return detector
Пример #9
0
def load_imperative_model(model_dir) :
    
    try:
        if os.environ['USE_EIA'] == "1":
            device = [mx.eia()] #use EIA for low cost GPU acceleration
        else :
            #use cpu with MK DNN acceleration
            device = [mx.cpu()]
    except:
        device = [mx.cpu()]
        
    kwargs = {'ctx': context, 
              'num_joints': 17,
              'pretrained': False,
              'pretrained_base': False,
              'pretrained_ctx': device}
    
    base, w = get_model_info(model_dir)
    net = model_zoo.get_model('simple_pose_resnet18_v1b', **kwargs)
    net.load_parameters(os.path.join(model_dir,w))
    
    return net
Пример #10
0
def model_fn(model_dir):
    """
    Load the gluon model. Called once when hosting service starts.
    :param: model_dir The directory where model files are stored.
    :return: a model (in this case a Gluon network)
    """
    try:
        ctx = mx.gpu(0)
    except Exception as e:
        if os.environ.get('SAGEMAKER_INFERENCE_ACCELERATOR_PRESENT') is True:
            ctx = mx.eia()
        else:
            ctx = mx.cpu(0)
    print("Running Model on {} context".format(ctx))

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        net = gluon.nn.SymbolBlock.imports(
            "{}/model-symbol.json".format(model_dir), ['data'],
            "{}/model.params".format(model_dir),
            ctx=ctx)
    return net
Пример #11
0
def transform_fn(net,
                 data,
                 input_content_type='application/x-npy',
                 output_content_type='application/json'):
    """
    Transform a request using the Gluon model. Called once per request.
    :param net: The Gluon model.
    :param data: The request payload.
    :param input_content_type: The request content type.
    :param output_content_type: The (desired) response content type.
    :return: response payload and content type.
    """
    # we can use content types to vary input/output handling, but
    # here we use numpy for input and json for output
    import numpy as np
    import io
    import base64

    try:
        ctx = mx.gpu(0)
    except Exception as e:
        if os.environ.get('SAGEMAKER_INFERENCE_ACCELERATOR_PRESENT') is True:
            ctx = mx.eia()
        else:
            ctx = mx.cpu(0)

    with warnings.catch_warnings():
        #    img = np.load(io.BytesIO(data))
        #    img = np.load(io.BytesIO(base64.b64decode(data)))
        img = json.loads(data)

    ndarray = mx.nd.array(img, ctx)
    output = net(ndarray).asnumpy()
    result = np.squeeze(output)
    result_exp = np.exp(result - np.max(result))
    result = result_exp / np.sum(result_exp)
    response_body = json.dumps(result.tolist())
    return response_body, output_content_type
def model_fn(model_dir):
    """
    Load the gluon model. Called once when hosting service starts.
    :param: model_dir The directory where model files are stored.
    :return: a model (in this case a Gluon network)
    """
    
    if os.environ.get('SAGEMAKER_INFERENCE_ACCELERATOR_PRESENT') == 'true':
        ctx = mx.eia()
        print("Placing Model on {} context".format(ctx))
        prefix = f"{model_dir}/model"
        net = load_model(prefix, ctx)
    elif mx.context.num_gpus() > 0:  
        ctx = mx.gpu()
        print("Placing Model on {} context".format(ctx))
        prefix = f"{model_dir}/model"
        net = load_model(prefix, ctx)
    else:
        ctx = mx.cpu()
        print("Placing Model on {} context".format(ctx))
        prefix = f"{model_dir}/model"
        net = load_model(prefix, ctx)
    return net
Пример #13
0
def model_fn(model_dir):

    try:
        if os.environ['USE_EIA'] == "1":
            device = mx.eia()  #use EIA for low cost GPU acceleration
        elif os.environ['USE_GPU'] == "1":
            device = mx.gpu()
        else:
            #use cpu with MK DNN acceleration
            device = mx.cpu()
    except:
        logger.error("Failed to set desired device context.")
        device = mx.cpu()

    try:
        if os.environ['MX_MODE'] == "imperative":
            pose_model = load_imperative_model(model_dir, device)
        elif os.environ['MX_MODE'] == "symbolic":
            pose_model = load_sym_model(os.environ["SYM_FILE_NAME"],
                                        os.environ["PARAM_FILE_NAME"],
                                        model_dir, device)
        else:
            pose_model = load_sym_model(os.environ["SYM_FILE_NAME"],
                                        os.environ["PARAM_FILE_NAME"],
                                        model_dir, device)


#        pose_model = get_model('simple_pose_resnet18_v1b',
#                               num_joints=17, pretrained=True,
#                               ctx=device, pretrained_ctx=device)

        return pose_model
    except:
        RuntimeException("Failed load: {} {}. Mode: {} Ctx: {}.".format(
            os.environ["SYM_FILE_NAME"], os.environ["PARAM_FILE_NAME"],
            os.environ['MX_MODE'], device))
def transform_fn(net, data, input_content_type='application/x-npy', output_content_type='application/json'):
    """
    Transform a request using the Gluon model. Called once per request.
    :param net: The Gluon model.
    :param data: The request payload.
    :param input_content_type: The request content type.
    :param output_content_type: The (desired) response content type.
    :return: response payload and content type.
    """
    # we can use content types to vary input/output handling, but
    # here we use numpy for input and json for output
    import numpy as np
    import io
    import base64
        
    with warnings.catch_warnings():
        img = json.loads(data)
        
    if os.environ.get('SAGEMAKER_INFERENCE_ACCELERATOR_PRESENT') == 'true':
        ctx = mx.eia()
        ndarray = mx.nd.array(img, ctx)
    elif mx.context.num_gpus() > 0:  
        ctx = mx.gpu()
        ndarray = mx.nd.array(img, ctx)
    else:
        ctx = mx.cpu()
        ndarray = mx.nd.array(img, ctx)
            
    class_dict = {0:"not_hot_dog", 1:"hot_dog"}
    output = net.predict(ndarray).asnumpy()
    result = np.squeeze(output)
    result_exp = np.exp(result - np.max(result))
    result = result_exp / np.sum(result_exp)
    result_class = np.argmax(result)
    response_body = json.dumps({"predicted_class":class_dict[result_class], "confidence":str(result[result_class])})
    return response_body, output_content_type
Пример #15
0
#[mx.test_utils.download(path+'resnet/50-layers/resnet-50-0000.params'),

#mx.test_utils.download(path+'resnet/50-layers/resnet-50-symbol.json'),

#mx.test_utils.download(path+'synset.txt')]


path = 'http://data.dmlc.ml/models/imagenet/squeezenet/'

[mx.test_utils.download(path+'squeezenet_v1.1-0000.params'),

mx.test_utils.download(path+'squeezenet_v1.1-symbol.json')]


ctx = mx.eia()



with open('synset.txt', 'r') as f:

  labels = [l.rstrip() for l in f]



sym, args, aux = mx.model.load_checkpoint('resnet-50', 0)



img = mx.image.imread(image)
<<<<<<< HEAD
Пример #16
0
def _context():
    if os.environ.get(INFERENCE_ACCELERATOR_PRESENT_ENV) == 'true':
        return mx.eia()

    # TODO mxnet ctx - better default, allow user control
    return mx.cpu()
Пример #17
0
def main():
    parser = argparse.ArgumentParser(description="Script to train a U-Net on the images passed as parameters.")
    parser.add_argument("--train", metavar="train_images",
                        help="Path to directory containing the training images (relative to project root)",
                        default="/opt/ml/input/data/train")

    parser.add_argument("--val", metavar="val_images",
                        help="Path to directory containing the validation images",
                        default="/opt/ml/input/data/validation")
    
    parser.add_argument("--masks", metavar="masks",
                        help="Path to directory containing the ground truth masks",
                        default="/opt/ml/input/data/masks")

    parser.add_argument("--train-masks",
                        help="Used to define a different directory for training set masks",
                        default=None, dest="train_masks")

    parser.add_argument("--checkpoints", metavar="checkpoints_dir",
                        help="Path to directory where the parameters of the best models will be stored",
                        default="/opt/ml/model/")

    parser.add_argument("--epochs",
                        help="Number of epochs to train for (default is 50)",
                        type=int, default=50)

    parser.add_argument("--batch-size",
                        help="Specify batch size (default is 8)",
                        type=int, default=8)

    parser.add_argument("--multispectral",
                        help="Enable multispectral mode",
                        default="rgb")

    parser.add_argument("--use-cpu",
                        help="Train on CPU instead of GPU (very slow, not recommended)",
                        dest="cpu", action="store_true")

    parser.add_argument("--gpu-count",
                        help="Number of GPUs to use",
                        type=int, default=1, dest="gpus")

    parser.add_argument("--use-eia",
                        help="Train on Elastic Inference",
                        dest="eia", action="store_true")
        
    parser.add_argument("--drop-bands",
                        help="Indexes of bands not to be used, separed by commas",
                        default=None, dest="drop")

    parser.add_argument("--show",
                        help="Plots the learning curve once training is completed",
                        dest="show", action="store_true")

    parser.add_argument("--data-aug",
                        help="If selected, training data is augmented online before being fed to the network",
                        dest="aug", action="store_true")

    args = parser.parse_args()

    if args.eia:
        ctx = [mx.eia()]
    elif args.cpu:
        ctx = [mx.cpu()]
    else:
        ctx=[mx.gpu(i) for i in range(args.gpus)]
    
    if args.multispectral == "rgb":
        multispectral=False
    else:
        multispectral=True
        
    batch_size = args.batch_size

    root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
    train_dir = os.path.join(root, args.train)
    val_dir = os.path.join(root, args.val)
    mask_dir = os.path.join(root, args.masks)
    checkpoint_dir = os.path.join(root, args.checkpoints)
    tmp = os.path.join(os.path.dirname(__file__), 'tmp')

    train_masks = os.path.join(root, args.train_masks) if args.train_masks is not None else mask_dir

    drop_bands = [int(band) for band in args.drop.split(',')] if args.drop is not None else None

    # Create train and validation DataLoaders from our Datasets
    if args.aug:
        print ("Using AugmentorDataset")
        dataset_class = AugmentorDataset
    else:
        dataset_class = ImageWithMaskDataset

    train_ds = dataset_class(train_dir, train_masks, multisp=multispectral,
                             transform_fn=lambda b, m, ms: transform(b, m, ms, drop=drop_bands))
    train_iter = gluon.data.DataLoader(train_ds, batch_size, shuffle=True)

    val_ds = ImageWithMaskDataset(val_dir, mask_dir, multisp=multispectral,
                                  transform_fn=lambda b, m, ms: transform(b, m, ms, drop=drop_bands))
    val_iter = gluon.data.DataLoader(val_ds, batch_size, shuffle=True)

    if len(train_ds) == 0:
        raise ValueError('The train directory {} does not contain any valid images'.format(train_dir))
    if len(val_ds) == 0:
        raise ValueError('The test directory {} does not contain any valid images'.format(train_dir))

    data = {'train': train_iter, 'val': val_iter}

    # Instantiate a U-Net and train it
    net = unet.Unet()
    net.collect_params().initialize(mx.init.Xavier(), ctx=ctx)
    net.hybridize()
    loss = DiceCoeffLoss()
    trainer = gluon.Trainer(net.collect_params(), 'adam',
                            {'learning_rate': 1e-4, 'beta1': 0.9, 'beta2': 0.99})

    epochs = args.epochs

    results = train(net, data, loss, trainer, ctx, epochs, tmp)

    if args.show:
        import matplotlib.pyplot as plt
        plt.plot(range(len(results['val'])), results['val'])
        plt.title('Model learning curve')
        plt.xlabel('Epoch')
        plt.ylabel('IoU score')
        plt.show()

    # Find best scoring model
    best = results['val'].index(max(results['val']))
    best_params = os.path.join(tmp, results['names'][best])

    # Copy it to <checkpoints> folder
    os.makedirs(checkpoint_dir, exist_ok=True)
    if multispectral:
        bands = 'ALL_BANDS' if drop_bands is None else ('ALL_BANDS-' + '-'.join([str(b) for b in drop_bands]))
    else:
        bands = 'RGB'
    save_filename = os.path.join(checkpoint_dir, 'unet_{}.params'.format(bands))
    shutil.copyfile(best_params, save_filename)
    np.save(os.path.join(checkpoint_dir, 'unet_{}_learning_curve_data.npy'.format(bands)), results)
    shutil.rmtree(tmp)

    print ('Best model on validation set saved in: {}'.format(save_filename))