Esempio n. 1
0
    def _createDense(self, dtype=core.DataType.FLOAT):
        perfect_model = np.array([2, 6, 5, 0, 1]).astype(np.float32)
        np.random.seed(123)  # make test deterministic
        numpy_dtype = np.float32 if dtype == core.DataType.FLOAT else np.float16
        initializer = Initializer if dtype == core.DataType.FLOAT else pFP16Initializer
        data = np.random.randint(2,
                                 size=(20,
                                       perfect_model.size)).astype(numpy_dtype)
        label = np.dot(data, perfect_model)[:, np.newaxis]

        model = ModelHelper(name="test", arg_scope={'order': 'NCHW'})
        out = brew.fc(model,
                      'data',
                      'fc',
                      perfect_model.size,
                      1, ('ConstantFill', {}), ('ConstantFill', {}),
                      axis=0,
                      WeightInitializer=initializer,
                      BiasInitializer=initializer)
        if dtype == core.DataType.FLOAT16:
            out = model.HalfToFloat(out, out + "_fp32")
        sq = model.SquaredL2Distance([out, 'label'])
        loss = model.AveragedLoss(sq, "avg_loss")
        grad_map = model.AddGradientOperators([loss])
        self.assertIsInstance(grad_map['fc_w'], core.BlobReference)
        return (model, perfect_model, data, label)
Esempio n. 2
0
def GenerateLossOps(model: ModelHelper, model_id: str, output_blob: str,
                    label_blob: str, loss_blob: str) -> None:
    """
    Adds loss operators to net. The loss function is computed by a squared L2
    distance, and then averaged over all items in the minibatch.

    :param model: ModelHelper object to add loss operators to.
    :param model_id: String identifier.
    :param output_blob: Blob containing output of net.
    :param label_blob: Blob containing labels.
    :param loss_blob: Blob in which to store loss.
    """
    dist = model.SquaredL2Distance([label_blob, output_blob],
                                   model_id + "dist")
    model.AveragedLoss(dist, loss_blob)
# Print the predict and init net to see what protobuf was created for this model
print("************* Predict Net *************")
print(regression_model.net.Proto())
print("\n************* Init Net *************")
print(regression_model.param_init_net.Proto())

# #### Add the training operators and prime the workspace
#
# In this **very important** step, we specify the loss function, setup the SGD training algorithm, prime and initialize the workspace, and initialize our model's weights and biases.

# In[5]:

# The loss function is computed by a squared L2 distance,
#   and then averaged over all items.
dist = regression_model.SquaredL2Distance(['Y_gt', y_pred], "dist")
loss = regression_model.AveragedLoss(dist, "loss")

# Add the gradient operators and setup the SGD algorithm
regression_model.AddGradientOperators([loss])
optimizer.build_sgd(regression_model, base_learning_rate=learning_rate)

# Prime the workspace with some data
workspace.FeedBlob("Y_gt", Y_gt.astype(np.float32))
workspace.FeedBlob("X", X.astype(np.float32))

# Run the init net to prepare the workspace then create the net
workspace.RunNetOnce(regression_model.param_init_net)
workspace.CreateNet(regression_model.net)

# Inject our desired initial weights and bias