def add_model_inputs(model, batch_size, db, db_type):

    # Load data from DB
    input_images_uint8, input_labels = brew.db_input(
        model,
        blobs_out=["input_images_uint8", "input_labels"],
        batch_size=batch_size,
        db=db,
        db_type=db_type,
    )

    # Cast grayscale pixel values to float
    # Scale pixel values to [0, 1]
    input_images = model.Cast(input_images_uint8,
                              "input_images",
                              to=core.DataType.FLOAT)
    input_images = model.Scale(input_images,
                               input_images,
                               scale=float(1. / 256))

    # We do not need gradient for backward pass
    # This op stops gradient computation through it
    input_images = model.StopGradient(input_images, input_images)

    return input_images, input_labels
    def add_input(self, model, batch_size, db, db_type, device_opts):
        with core.DeviceScope(device_opts):
            if not os.path.isdir(db):
                logging.error("Data loading failure. Directory '" +
                              os.path.abspath(db) + "' does not exist.")
                sys.exit(1)
            elif not (os.path.isfile(os.path.join(db, 'data.mdb'))
                      and os.path.isfile(os.path.join(db, 'lock.mdb'))):
                logging.error("Data loading failure. Directory '" +
                              os.path.abspath(db) +
                              "' does not contain lmdb files.")
                sys.exit(1)

            # load the data
            data_uint8, label = brew.db_input(
                model,
                blobs_out=["data_uint8", "label"],
                batch_size=batch_size,
                db=db,
                db_type=db_type,
            )
            # cast the data to float
            data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)

            # scale data from [0,255] down to [0,1]
            data = model.Scale(data, data, scale=float(1. / 256))

            # don't need the gradient for the backward pass
            data = model.StopGradient(data, data)

            dataset_size = int(lmdb.open(db).stat()['entries'])

            return data, label, dataset_size
Beispiel #3
0
def AddInput(model, batch_size, db, db_type):
    """Load data with brew or model.TensorProtosDBInput"""
    # load the data from db - Method 1 using brew
    data_uint8, label = brew.db_input(
        model,
        blobs_out=["data_uint8", "label"],
        batch_size=batch_size,
        db=db,
        db_type=db_type,
    )

    # # load the data from db - Method 2 using TensorProtosDB
    # data_uint8, label = model.TensorProtosDBInput(
    #     [],
    #     ["data_uint8", "label"],
    #     batch_size=batch_size,
    #     db=db,
    #     db_type=db_type
    # )

    data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)
    data = model.Scale(data, data, scale=float(1. / 256))

    # don't need the gradient for the backward pass
    data = model.StopGradient(data, data)
    return data, label
Beispiel #4
0
def AddInput(model, db, db_type, date_name, label_name):
    data, label = brew.db_input(
        model,
        blobs_out=[date_name, label_name],
        batch_size=100,
        db=db,
        db_type=db_type,
    )
    return data, label
Beispiel #5
0
def AddInput(model, batch_size, db, db_type):
    # load the data
    data_uint8, label = brew.db_input(
        model,
        blobs_out=["data_uint8", "label"],
        batch_size=batch_size,
        db=db,
        db_type=db_type,
    )
    data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)
    # data = model.Scale(data, data, scale=float(1./256))  # scaled in download_data.py
    data = model.StopGradient(data, data)
    return data, label
Beispiel #6
0
def add_input(model, batch_size, db, db_type):
    data, label = brew.db_input(
        model=model,
        blobs_out=["data", "label"],
        batch_size=batch_size,
        db=db,
        db_type=db_type,
    )
    # Usually images have uint8 values (0 to 255).
    # Here the images are already scaled to have float values between 0.0 and 1.0.
    # So, cast unit8 values to float and scaling is not required here.
    # Scaling was done when scalogram-images were written to disk
    data = model.StopGradient(data, data)
    return data, label
Beispiel #7
0
def AddInput(model, batch_size, db, db_type):
    # load the data
    data_uint8, label = brew.db_input(
        model,
        blobs_out=["data_uint8", "label"],
        batch_size=batch_size,
        db=db,
        db_type=db_type,
    )
    # cast the data to float
    data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)
    # scale data from [0,255] down to [0,1]
    data = model.Scale(data, data, scale=float(1./256))
    # don't need the gradient for the backward pass
    data = model.StopGradient(data, data)
    return data, label
Beispiel #8
0
def AddInput(model, batch_size, db, db_type):
    # load the data
    data_uint8, label = brew.db_input(
        model,
        blobs_out=["data_uint8", "label"],
        batch_size=batch_size,
        db=db,
        db_type=db_type,
    )
    # cast the data to float
    data = model.Cast(data_uint8, "data", to=core.DataType.FLOAT)
    # scale data from [0,255] down to [0,1]
    data = model.Scale(data, data, scale=float(1. / 256))
    # don't need the gradient for the backward pass
    data = model.StopGradient(data, data)
    return data, label
def ScaffoldModelInput(model, lmdbPath, batchSize):
	data, label = brew.db_input(
		model,
		['data', 'label'],
		batch_size=batchSize,
		db=lmdbPath,
		db_type='lmdb'
	)

	# data is already in float type.. no conversion required
	# floatData = model.Cast(dataInt, 'data_float', to=core.DataType.FLOAT)

	# # channel values from 0-255 to 0-1 were already scaled in preprocessing part
	# data = model.Scale(floatData, 'data', scale=float(1. / 256))

	# model.StopGradient(data, data)
	return data, label
def AddInput(model, db, db_type, batch_size, mirror=0):
    # load the data
    data_f32, label = brew.db_input(
        model,
        blobs_out=["data_f32", "label"],
        batch_size=batch_size,
        db=db,
        db_type=db_type,
    )
    data_org = model.Cast(data_f32, "data_org", to=core.DataType.FLOAT)
    # mirroring tensor data randomly
    if mirror and np.random.uniform() > 0.49:
        data_org = workspace.FetchBlob("data_org")
        data = np.flip(data_org, axis=2)
        workspace.FeedBlob("data", data)
    else:
        data = model.Copy(data_org, "data")
        # prevent back-propagation: optional performance improvement; may not be observable at small scale
    data = model.StopGradient(data, data)