def _get_slot_variable_names(scope_name, var_name, optimization_parameters):
    """Return embedding variable names which are consistent with CPU runs."""
    if scope_name:
        scope_name = scope_name + '/'
    if isinstance(optimization_parameters,
                  tf.compat.v1.tpu.experimental.AdagradParameters):
        return tpu_embedding.AdagradSlotVariableName('{}{}/Adagrad'.format(
            scope_name, var_name))
    elif isinstance(optimization_parameters,
                    tf.compat.v1.tpu.experimental.AdamParameters):
        return tpu_embedding.AdamSlotVariableNames(
            '{}{}/Adam/m'.format(scope_name, var_name),
            '{}{}/Adam/v'.format(scope_name, var_name))
    elif isinstance(optimization_parameters,
                    tf.compat.v1.tpu.experimental.FtrlParameters):
        return tpu_embedding.FtrlSlotVariableName(
            '{}{}/Ftrl'.format(scope_name, var_name),  # accumulator
            '{}{}/Ftrl_1'.format(scope_name, var_name))  # linear
    elif isinstance(
            optimization_parameters,
            tf.compat.v1.tpu.experimental.StochasticGradientDescentParameters):
        return None
    else:
        raise ValueError(
            'Support to infer full variable name '
            'for optimization_parameter {} has not been added.'.format(
                optimization_parameters))
def _get_slot_variable_names(scope_name, var_name, optimization_parameters):
    """Return embedding variable names which are consistent with CPU runs."""
    if isinstance(optimization_parameters, tpu_embedding.AdagradParameters):
        return tpu_embedding.AdagradSlotVariableName('{}/{}/Adagrad'.format(
            scope_name, var_name))
    elif isinstance(optimization_parameters, tpu_embedding.AdamParameters):
        return tpu_embedding.AdamSlotVariableNames(
            '{}/{}/Adam/m'.format(scope_name, var_name),
            '{}/{}/Adam/v'.format(scope_name, var_name))
    elif isinstance(optimization_parameters,
                    tpu_embedding.StochasticGradientDescentParameters):
        return None
    else:
        raise ValueError(
            'Support to infer full variable name '
            'for optimization_parameter {} has not been added.'.format(
                optimization_parameters))