Exemplo n.º 1
0
    def sample_visible_given_hidden(self,
                                    hidden_batch: np.array,
                                    bias_visible_tf: tf.Variable,
                                    weights_tf: tf.Variable,
                                    binary: bool = True) -> np.array:
        """Sample visible units from the hidden

        Args:
            hidden_batch (np.array): Batch of hidden data in shape (batch_size, no_hidden)
            bias_visible_tf (tf.Variable): Tensorflow variable for visible bias
            weights_tf (tf.Variable): Tensorflow variable for weights
            binary (bool, optional): True to binarize, False for raw probabilities. Defaults to True.

        Returns:
            np.array: Samples in shape (batch_size, no_visible)
        """

        bias_visible = np.transpose(bias_visible_tf.numpy())
        weights = weights_tf.numpy()

        ef = lambda i_batch: self.activation_visible(hidden_batch=hidden_batch,
                                                     bias_visible=bias_visible,
                                                     weights=weights,
                                                     i_batch=i_batch)

        no_visible = bias_visible_tf.shape[1]

        return self._sample_x_given_y(y_batch=hidden_batch,
                                      no_x=no_visible,
                                      ef=ef,
                                      binary=binary)
Exemplo n.º 2
0
    def _do(self, input_tensor_variable: tf.Variable,
            params_tensor_variable: tf.Variable) -> (tf.Tensor, Callable):
        """
        Forward pass with both input and parameter variables

        This in-between function is necessary in order to have the custom gradient work in Tensorflow. That is the
        reason for returning the grad() function as well.

        Parameters
        ----------
        input_tensor_variable
            the tf.Variable which holds the values of the input
        params_tensor_variable
            the tf.Variable which holds the values of the parameters

        Returns
        -------
        result
            The result of the forwarding
        """
        if params_tensor_variable.shape != self._params_len:
            raise TequilaMLException(
                'Received input of len {} when Objective takes {} inputs.'.
                format(len(params_tensor_variable.numpy()), self._input_len))
        params_tensor_variable = tf.stack(params_tensor_variable)

        if input_tensor_variable.shape != self._input_len:
            raise TequilaMLException(
                'Received input of len {} when Objective takes {} inputs.'.
                format(len(input_tensor_variable.numpy()), self._input_len))
        input_tensor_variable = tf.stack(input_tensor_variable)

        def grad(upstream):
            input_gradient_values, parameter_gradient_values = self.get_grads_values(
            )
            # Convert to tensor
            in_Tensor = tf.convert_to_tensor(input_gradient_values,
                                             dtype=self._cast_type)
            par_Tensor = tf.convert_to_tensor(parameter_gradient_values,
                                              dtype=self._cast_type)

            # Multiply with the upstream
            in_Upstream = tf.dtypes.cast(upstream, self._cast_type) * in_Tensor
            par_Upstream = tf.dtypes.cast(upstream,
                                          self._cast_type) * par_Tensor

            # Transpose and sum
            return tf.reduce_sum(tf.transpose(in_Upstream),
                                 axis=0), tf.reduce_sum(
                                     tf.transpose(par_Upstream), axis=0)

        return self.realForward(inputs=input_tensor_variable,
                                params=params_tensor_variable), grad
Exemplo n.º 3
0
    def model_selection(self, checkpoint: tf.train.Checkpoint,
                        global_step: tf.Variable) -> Optional[Path]:
        """
        Perform model selection.

        Args:
            checkpoint (:py:class:`tf.train.Checkpoint`): Checkpoint object that contains
                the model status.
            global_step (:py:class:`tf.Variable`): current training step

        """
        current_value = self.result()
        previous_value = float(
            self.json_read(self.best_model_sel_file)[self.sanitized_name])
        # Model selection is done ONLY if an operator was passed at __init__
        if (self._model_selection_operator and self._model_selection_operator(
                current_value, previous_value)
                and not np.isclose(current_value, previous_value)):
            print(
                f"{self.sanitized_name}: validation value: {previous_value} → {current_value}",
            )
            self.json_write(
                self.best_model_sel_file,
                {
                    self.sanitized_name: str(current_value),
                    "step": int(global_step.numpy()),
                },
            )
            manager = tf.train.CheckpointManager(checkpoint,
                                                 self.best_folder / "ckpts",
                                                 max_to_keep=1)
            return Path(manager.save())
        return None
Exemplo n.º 4
0
    def model_selection(self, checkpoint: tf.train.Checkpoint,
                        global_step: tf.Variable) -> None:
        """
        Perform model selection.

        Args:
            checkpoint (:py:class:`tf.train.Checkpoint`): Checkpoint object that contains
                the model status.
            global_step (:py:class:`tf.Variable`): current training step

        """
        current_value = self.result()
        previous_value = float(
            self.json_read(self.best_model_sel_file)[self._name])
        # Model selection is done ONLY if an operator was passed at __init__
        if self._model_selection_operator and self._model_selection_operator(
                current_value, previous_value):
            tf.print(
                f"{self.name}: validation value: {previous_value} → {current_value}"
            )
            Metric.json_write(
                self.best_model_sel_file,
                {
                    self._name: str(current_value),
                    "step": int(global_step.numpy())
                },
            )
            manager = tf.train.CheckpointManager(checkpoint,
                                                 os.path.join(
                                                     self.best_folder,
                                                     "ckpts"),
                                                 max_to_keep=1)
            manager.save()
Exemplo n.º 5
0
def defining_variables():
    # Define the 1-dimensional variable A1
    A1 = Variable([1, 2, 3, 4])
    print('\n A1: ', A1)

    # Convert A1 to a numpy array and assign it to B1
    B1 = A1.numpy()
    print('\n B1: ', B1)
Exemplo n.º 6
0
def train_epoch(model,
                optimizer,
                dataset,
                epoch_index: int,
                batch_index: tf.Variable,
                log_freq: int = 250,
                writer=None):

    to_fine_tune = [v for v in model.trainable_variables]

    epoch_metrics = make_metric_dict(
        ["Localization", "Confidence", "WeightedTotal"])
    era_metrics = make_metric_dict(
        ["Localization", "Confidence", "WeightedTotal"])

    for (_, met) in era_metrics.items():
        met.reset_states()

    epoch_samples = 0
    era_samples = 0
    _log("Started new training epoch")

    batch_start = batch_index.numpy()
    for batch in dataset:
        batch_index.assign_add(1)
        epoch_samples += len(batch["image"])
        era_samples += len(batch["image"])

        keys = [
            "cls_targets", "cls_weights", "reg_targets", "reg_weights",
            "matched"
        ]
        images, shapes = model.preprocess(batch["image"])
        model.provide_groundtruth_direct(**{k: batch[k] for k in keys})
        with tf.GradientTape() as tape:
            prediction_dict = model.predict(images, shapes)
            loss_dict = model.loss(prediction_dict)
        gradients = tape.gradient(loss_dict["WeightedTotal"], to_fine_tune)
        optimizer.apply_gradients(zip(gradients, to_fine_tune))
        update_metric_dict(epoch_metrics, loss_dict)
        update_metric_dict(era_metrics, loss_dict)

        if (batch_index - batch_start) % log_freq == 0:
            _log(f"Completed {batch_index - batch_start} batches")
            if writer:
                l_dict = metric2scalar_dict(
                    era_metrics,
                    prefix=f"Loss/Train/Last_{log_freq}_Batches",
                    v_func=lambda v: v / era_samples,
                    reset_states=True)
                write_scalars(writer, l_dict, step=batch_index)

    if writer:
        l_dict = metric2scalar_dict(epoch_metrics,
                                    prefix=f"Loss/Train/Epoch",
                                    v_func=lambda v: v / epoch_samples,
                                    reset_states=True)
        write_scalars(writer, l_dict, step=epoch_index)
Exemplo n.º 7
0
def var_to_var(var_from: tf.Variable, var_to: tf.Variable, epsilon: float):
    """Expands a variable to another variable.

  Assume the shape of `var_from` is (a, b, ..., y, z), the shape of `var_to`
  can be (a, ..., z * 2), (a * 2, ..., z * 2), (a * 2, ..., z)

  If the shape of `var_to` is (a, ..., 2 * z):
    For any x, tf.matmul(x, var_to) ~= expand_vector(tf.matmul(x, var_from)) / 2
    Not that there will be noise added to the left hand side, if epsilon != 0.
  If the shape of `var_to` is (2 * a, ..., z):
    For any x, tf.matmul(expand_vector(x), var_to) == tf.matmul(x, var_from)
  If the shape of `var_to` is (2 * a, ..., 2 * z):
    For any x, tf.matmul(expand_vector(x), var_to) ==
        expand_vector(tf.matmul(expand_vector(x), var_from))

  Args:
    var_from: input variable to expand.
    var_to: output variable.
    epsilon: the noise ratio that will be added, when splitting `var_from`.
  """
    shape_from = var_from.shape
    shape_to = var_to.shape

    if shape_from == shape_to:
        var_to.assign(var_from)

    elif len(shape_from) == 1 and len(shape_to) == 1:
        var_to.assign(expand_vector(var_from.numpy()))

    elif shape_from[0] * 2 == shape_to[0] and shape_from[-1] == shape_to[-1]:
        var_to.assign(expand_1_axis(var_from.numpy(), epsilon=epsilon, axis=0))

    elif shape_from[0] == shape_to[0] and shape_from[-1] * 2 == shape_to[-1]:
        var_to.assign(expand_1_axis(var_from.numpy(), epsilon=epsilon,
                                    axis=-1))

    elif shape_from[0] * 2 == shape_to[0] and shape_from[-1] * 2 == shape_to[
            -1]:
        var_to.assign(expand_2_axes(var_from.numpy(), epsilon=epsilon))

    else:
        raise ValueError("Shape not supported, {}, {}".format(
            shape_from, shape_to))
Exemplo n.º 8
0
    def test_multiple_expectation_different_wires(self, qubit_device_2_wires, tol):
        """Tests that qnodes return multiple expectation values."""
        a, b, c = Variable(0.5), Variable(0.54), Variable(0.3)

        @qml.qnode(qubit_device_2_wires, interface='tf')
        def circuit(x, y, z):
            qml.RX(x, wires=[0])
            qml.RZ(y, wires=[0])
            qml.CNOT(wires=[0, 1])
            qml.RY(y, wires=[0])
            qml.RX(z, wires=[0])
            return qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(1))

        res = circuit(a, b, c)

        out_state = np.kron(Rotx(c.numpy()), I) @ np.kron(Roty(b.numpy()), I) @ CNOT \
            @ np.kron(Rotz(b.numpy()), I) @ np.kron(Rotx(a.numpy()), I) @ np.array([1, 0, 0, 0])

        ex0 = np.vdot(out_state, np.kron(Y, I) @ out_state)
        ex1 = np.vdot(out_state, np.kron(I, Z) @ out_state)
        ex = np.array([ex0, ex1])

        assert np.allclose(ex, res.numpy(), atol=tol, rtol=0)
Exemplo n.º 9
0
# Read in the dataset,
data = pd.read_csv('car.csv', compression='gzip')
data = data.drop(columns=['Origin', 'Model_year'])

# define the trainable variables
intercept = Variable(10.0, float32)
slope = Variable(0.1, float32)


# define loss function
def lossfunc(intercept, slope, feature, target):
    predictions = intercept + slope * feature
    return keras.losses.mse(target, predictions)


# Initialize the Adam optimizer
optim = keras.optimizers.Adam(0.001)

# Run the linear model
for batch in pd.read_csv('car.csv', compression='gzip', chunksize=150):
    feature_batch = tf.cast(batch['Horsepower'], float32)
    target_batch = tf.cast(batch['MPG'], float32)
    optim.minimize(
        lambda: lossfunc(intercept, slope, feature_batch, target_batch),
        var_list=[intercept, slope])
    print(intercept.numpy(), slope.numpy())

# Print trainable variables
print(intercept.numpy(), slope.numpy())
print(data.head())