Example #1
0
 def _set_constant_mask():
     # Assign mask tensor to be 1 for all nonzero values of op_var_tens otherwise 0
     # On end step, revert mask to be all 1s
     with tf_compat.name_scope(
             PruningScope.model(
                 op,
                 ks_group,
                 additional=PruningScope.OPS_UPDATE,
                 trailing_slash=True,
             )):
         new_mask = tf_compat.cond(
             is_start_step,
             lambda: tf_compat.cast(tf_compat.not_equal(op_var_tens, 0.0),
                                    dtype=op_var_tens.dtype),
             lambda: tf_compat.ones(op_var_tens.shape,
                                    dtype=op_var_tens.dtype),
         )
         weight_var = get_tensor_var(op_var_tens)
         return tf_compat.group(
             tf_compat.assign(mask,
                              new_mask,
                              name=PruningScope.OP_MASK_ASSIGN),
             tf_compat.assign(weight_var,
                              masked,
                              name=PruningScope.OP_WEIGHT_UPDATE),
         )
Example #2
0
def multi_step_lr_schedule(
    global_step: tf_compat.Tensor,
    start_step: int,
    milestone_steps: List[int],
    init_lr: float,
    gamma: float,
    name: str = "multi_step_lr_schedule",
):
    """
    Create a multi step learning rate schedule in the current graph.
    Multiplies init_lr by gamma after each milestone has passed.
    Ex: lr = init_lr * (gamma ** NUM_UPDATES)

    :param global_step: the global step used for training
    :param start_step: the step to start the exponential schedule on
    :param milestone_steps: a list of steps to decrease the learning rate at,
        these are the number of steps that must pass after start_step to decrease lr
    :param init_lr: the learning rate to start the schedule with
    :param gamma: the decay weight to decrease init_lr by after every step_size interval
    :param name: the name scope to create the graph under
    :return: the calculated learning rate tensor
    """
    with tf_compat.name_scope(name):
        global_step = tf_compat.cast(global_step, tf_compat.int64)
        milestone_steps = tf_compat.constant(
            [mile + start_step for mile in milestone_steps],
            dtype=tf_compat.int64,
            name="milestone_steps",
        )
        start_step = tf_compat.constant(start_step,
                                        dtype=tf_compat.int64,
                                        name="start_step")
        init_lr = tf_compat.constant(init_lr,
                                     dtype=tf_compat.float32,
                                     name="init_lr")
        gamma = tf_compat.constant(gamma,
                                   dtype=tf_compat.float32,
                                   name="gamma")
        before = tf_compat.less(global_step, start_step, name="before")

        def _calc_lr():
            less = tf_compat.cast(
                tf_compat.greater_equal(global_step, milestone_steps),
                tf_compat.int64)
            updates = tf_compat.reduce_sum(less)
            mult_g = tf_compat.pow(gamma,
                                   tf_compat.cast(updates, tf_compat.float32))

            return tf_compat.multiply(init_lr, mult_g)

        learning_rate = tf_compat.cond(before,
                                       lambda: init_lr,
                                       _calc_lr,
                                       name="learning_rate")

    return learning_rate
Example #3
0
def create_op_pruning_no_update(
    op: tf_compat.Operation,
    op_input: tf_compat.Tensor,
    ks_group: str,
    leave_enabled: bool = True,
    is_after_end_step: tf_compat.Tensor = None,
) -> PruningOpVars:
    """
    Creates the necessary variables and operators to gradually
    apply sparsity to an operators variable without returning a
    PruningOpVars.update value.

    :param op: the operation to prune to the given sparsity
    :param op_input: the parameter within the op to create a mask for
    :param ks_group: the group identifier the scope should be created under
        mask_creator
    :param leave_enabled: True to continue masking the weights after end_epoch,
        False to stop masking
    :param is_after_end_step: only should be provided if leave_enabled is False;
        tensor that is true if the current global step is after end_epoch
    :return: a named tuple containing the assignment op, mask variable,
        threshold tensor, and masked tensor
    """
    if tf_contrib_err:
        raise tf_contrib_err

    op_sgv = graph_editor.sgv(op)

    # create the necessary variables first
    with tf_compat.variable_scope(PruningScope.model(op, ks_group),
                                  reuse=tf_compat.AUTO_REUSE):
        mask = tf_compat.get_variable(
            PruningScope.VAR_MASK,
            op_input.get_shape(),
            initializer=tf_compat.ones_initializer(),
            trainable=False,
            dtype=op_input.dtype,
        )
    tf_compat.add_to_collection(
        PruningScope.collection_name(ks_group, PruningScope.VAR_MASK), mask)

    # create the masked operation and assign as the new input to the op
    with tf_compat.name_scope(
            PruningScope.model(op, ks_group, trailing_slash=True)):
        masked = tf_compat.multiply(mask, op_input, PruningScope.OP_MASKED_VAR)
        op_inp_tens = (masked if leave_enabled else tf_compat.cond(
            is_after_end_step, lambda: op_input, lambda: masked))
        op_swapped_inputs = [
            inp if inp != op_input else op_inp_tens for inp in op_sgv.inputs
        ]
        graph_editor.swap_inputs(op, op_swapped_inputs)
    tf_compat.add_to_collection(
        PruningScope.collection_name(ks_group, PruningScope.OP_MASKED_VAR),
        masked)
    return PruningOpVars(op, op_input, None, mask, masked)
Example #4
0
        def _calc_lr():
            steps = tf_compat.subtract(global_step, start_step)
            updates = tf_compat.cond(
                after,
                lambda: max_updates,
                lambda: tf_compat.cast(
                    tf_compat.floor(tf_compat.divide(steps, step_size)),
                    tf_compat.int64,
                ),
            )
            mult_g = tf_compat.pow(gamma,
                                   tf_compat.cast(updates, tf_compat.float32))

            return tf_compat.multiply(init_lr, mult_g)
Example #5
0
    def cent_crop(img: tf_compat.Tensor):
        with tf_compat.name_scope(name):
            orig_shape = tf_compat.shape(img)
            min_size = tf_compat.cond(
                tf_compat.greater_equal(orig_shape[0], orig_shape[1]),
                lambda: orig_shape[1],
                lambda: orig_shape[0],
            )

            if padding > 0:
                orig_shape_list = img.get_shape().as_list()
                resize((orig_shape_list[0] + 2 * padding,
                        orig_shape_list[1] + 2 * padding))

            padding_height = tf_compat.add(
                tf_compat.cast(
                    tf_compat.round(
                        tf_compat.div(
                            tf_compat.cast(
                                tf_compat.subtract(orig_shape[0], min_size),
                                tf_compat.float32,
                            ),
                            2.0,
                        )),
                    tf_compat.int32,
                ),
                padding,
            )
            padding_width = tf_compat.add(
                tf_compat.cast(
                    tf_compat.round(
                        tf_compat.div(
                            tf_compat.cast(
                                tf_compat.subtract(orig_shape[1], min_size),
                                tf_compat.float32,
                            ),
                            2.0,
                        )),
                    tf_compat.int32,
                ),
                padding,
            )
            img = tf_compat.image.crop_to_bounding_box(img, padding_height,
                                                       padding_width, min_size,
                                                       min_size)

            return img
Example #6
0
def preprocess_for_train(image: tf_compat.Tensor):
    """
    The default preprocessing function for train set as defined in Resnet paper
    for Cifar datasets

    :param image: the image tensor

    :return: the preprocessed image
    """
    with tf_compat.name_scope("train_preprocess"):
        image = tf_compat.cast(image, dtype=tf_compat.float32)
        rand_choice = tf_compat.random_uniform(shape=[],
                                               minval=0,
                                               maxval=2,
                                               dtype=tf_compat.int32)
        padding = _PADDING
        image = tf_compat.cond(
            tf_compat.equal(rand_choice, 0),
            lambda: tf_compat.pad(image, [[padding, padding],
                                          [padding, padding], [0, 0]]),
            lambda: tf_compat.image.random_flip_left_right(image),
        )
        distorted_image = tf_compat.image.random_crop(image, [32, 32, 3])
        return distorted_image
Example #7
0
def create_ks_schedule_ops(
    global_step: tf_compat.Variable,
    begin_step: int,
    end_step: int,
    update_step_freq: int,
    init_sparsity: float,
    final_sparsity: float,
    exponent: float,
    ks_group: str,
) -> Tuple[tf_compat.Tensor, tf_compat.Tensor]:
    """
    Create a gradual schedule for model pruning (kernel sparsity).
    Creates a sparsity tensor that goes from init_sparsity til final_sparsity
    starting at begin_step and ending at end_step.
    Uses the global_step to map those.
    Additionally creates an update_ready tensor that is True if an update
    to the sparsity tensor should be run, False otherwise.

    :param global_step: the global optimizer step for the training graph
    :param begin_step: the global step to begin pruning at
    :param end_step: the global step to end pruning at
    :param update_step_freq: the number of global steps between each weight update
    :param init_sparsity: the starting value for sparsity of a
        weight tensor to be enforce
    :param final_sparsity: the end value for sparsity for a weight tensor to be enforce
    :param exponent: the exponent to use for interpolating between
        init_sparsity and final_sparsity higher values will lead to larger sparsity
        steps at the beginning vs the end ie: linear (1) vs cubic (3)
    :param ks_group: the group identifier the scope should be created under
    :return: a tuple containing the signal for update_ready and the target sparsity
    """

    # create the scheduling ops first and the sparsity ops
    with tf_compat.name_scope(
            PruningScope.general(ks_group,
                                 additional=PruningScope.OPS_SCHEDULE,
                                 trailing_slash=True)):
        sched_before = tf_compat.less(global_step, begin_step)
        sched_start = tf_compat.equal(global_step, begin_step)
        sched_end = tf_compat.equal(global_step, end_step)
        sched_active = tf_compat.logical_and(
            tf_compat.greater(global_step, begin_step),
            tf_compat.less(global_step, end_step),
        )
        sched_active_inclusive = tf_compat.logical_or(
            sched_active, tf_compat.logical_or(sched_start, sched_end))
        sched_update = tf_compat.cond(
            tf_compat.less_equal(update_step_freq, 0),
            lambda: tf_compat.constant(True),
            lambda: tf_compat.equal(
                tf_compat.mod(
                    (global_step - begin_step), update_step_freq), 0),
        )
        sched_update_ready = tf_compat.logical_or(
            tf_compat.logical_or(sched_start, sched_end), sched_update)

        percentage = tf_compat.minimum(
            1.0,
            tf_compat.maximum(
                0.0,
                tf_compat_div(
                    tf_compat.cast(global_step - begin_step,
                                   tf_compat.float32),
                    end_step - begin_step,
                ),
            ),
        )
        exp_percentage = 1 - tf_compat.pow(1 - percentage, exponent)
        calc_sparsity = (tf_compat.multiply(final_sparsity - init_sparsity,
                                            exp_percentage) + init_sparsity)

        # create the update ready tensor and sparsity tensor
    with tf_compat.name_scope(
            PruningScope.general(ks_group, trailing_slash=True)):
        update_ready = tf_compat.logical_and(
            sched_active_inclusive,
            sched_update_ready,
            name=PruningScope.OP_UPDATE_READY,
        )
        sparsity = tf_compat.case(
            [
                (sched_before, lambda: tf_compat.constant(0.0)),
                (sched_start, lambda: tf_compat.constant(init_sparsity)),
                (sched_active, lambda: calc_sparsity),
            ],
            default=lambda: tf_compat.constant(final_sparsity),
            name=PruningScope.OP_SPARSITY,
        )

        # add return state to collections
    tf_compat.add_to_collection(
        PruningScope.collection_name(ks_group, PruningScope.OP_UPDATE_READY),
        update_ready,
    )
    tf_compat.add_to_collection(
        PruningScope.collection_name(ks_group, PruningScope.OP_SPARSITY),
        sparsity)

    return update_ready, sparsity
Example #8
0
def create_constant_op_pruning(
    op: tf_compat.Operation,
    op_input: tf_compat.Tensor,
    is_start_step: tf_compat.Tensor,
    is_end_step: tf_compat.Tensor,
    ks_group: str,
) -> PruningOpVars:
    """
    Creates PruningOpVars with constant mask for the given operation
    on start step, sets mask to be all 1s for the weight tensor where
    the operation input is non zero and 0 elsewhere.
    At the end_step we revert the mask to be all 1s and update the weight.

    :param op: the operation to prune to the given sparsity
    :param op_input: the input tensor to op to create a constant mask for
    :param is_start_step: True only if we are at the start step.
    :param is_end_step: True only if we are at the start end step.
    :param ks_group: the group identifier the scope should be created under
    :return: a named tuple containing the assignment op, mask variable,
        threshold tensor, and masked tensor
    """
    initial_vars = create_op_pruning_no_update(op, op_input, ks_group)
    op = initial_vars.op
    op_var_tens = initial_vars.op_input
    mask = initial_vars.mask
    masked = initial_vars.masked

    is_start_or_end_step = tf_compat.logical_or(is_start_step, is_end_step)

    def _set_constant_mask():
        # Assign mask tensor to be 1 for all nonzero values of op_var_tens otherwise 0
        # On end step, revert mask to be all 1s
        with tf_compat.name_scope(
                PruningScope.model(
                    op,
                    ks_group,
                    additional=PruningScope.OPS_UPDATE,
                    trailing_slash=True,
                )):
            new_mask = tf_compat.cond(
                is_start_step,
                lambda: tf_compat.cast(tf_compat.not_equal(op_var_tens, 0.0),
                                       dtype=op_var_tens.dtype),
                lambda: tf_compat.ones(op_var_tens.shape,
                                       dtype=op_var_tens.dtype),
            )
            weight_var = get_tensor_var(op_var_tens)
            return tf_compat.group(
                tf_compat.assign(mask,
                                 new_mask,
                                 name=PruningScope.OP_MASK_ASSIGN),
                tf_compat.assign(weight_var,
                                 masked,
                                 name=PruningScope.OP_WEIGHT_UPDATE),
            )

    def _no_op():
        with tf_compat.name_scope(
                PruningScope.model(
                    op,
                    ks_group,
                    additional=PruningScope.OPS_UPDATE,
                    trailing_slash=True,
                )):
            # return no op wrapped in group to match update type
            return tf_compat.group(
                tf_compat.constant(0.0,
                                   dtype=op_var_tens.dtype,
                                   name=PruningScope.OP_MASK_UPDATE_NO_OP))

    with tf_compat.name_scope(
            PruningScope.model(
                op,
                ks_group,
                additional=PruningScope.OPS_UPDATE,
                trailing_slash=True,
            )):
        mask_update = tf_compat.cond(
            is_start_or_end_step,
            _set_constant_mask,
            _no_op,
            name=PruningScope.OP_MASK_UPDATE,
        )
    return PruningOpVars(op, op_var_tens, mask_update, mask, masked)
Example #9
0
def create_op_pruning(
    op: tf_compat.Operation,
    op_input: tf_compat.Tensor,
    sparsity: tf_compat.Tensor,
    update_ready: tf_compat.Tensor,
    leave_enabled: bool,
    is_after_end_step: tf_compat.Tensor,
    ks_group: str,
    mask_creator: PruningMaskCreator,
) -> PruningOpVars:
    """
    Creates the necessary variables and operators to gradually
    apply sparsity to an operators variable.

    Handles setting a mask on an operator to the given sparsity.
    Sets the mask based on pruning away the lowest absolute magnitude weights.

    :param op: the operation to prune to the given sparsity
    :param op_input: the variable of the parameter within op to prune
    :param sparsity: the target sparsity to use for assigning the masks
    :param update_ready: the tensor where if true will update the mask from sparsity,
        if false will not update the mask
    :param leave_enabled: True to continue masking the weights after end_epoch,
        False to stop masking
    :param is_after_end_step: tensor that is true if the current global step
        is after end_epoch
    :param ks_group: the group identifier the scope should be created under
    :param mask_creator: object to define sparisty mask creation
    :return: a named tuple containing the assignment op, mask variable,
        threshold tensor, and masked tensor
    """
    initial_vars = create_op_pruning_no_update(op, op_input, ks_group,
                                               leave_enabled,
                                               is_after_end_step)
    op = initial_vars.op
    op_var_tens = initial_vars.op_input
    mask = initial_vars.mask
    masked = initial_vars.masked

    def _update():
        # create the update ops using the target sparsity tensor
        with tf_compat.name_scope(
                PruningScope.model(
                    op,
                    ks_group,
                    additional=PruningScope.OPS_UPDATE,
                    trailing_slash=True,
                )):
            new_mask = mask_creator.create_sparsity_mask(op_var_tens, sparsity)
            weight_var = get_tensor_var(op_var_tens)
            return tf_compat.group(
                tf_compat.assign(mask,
                                 new_mask,
                                 name=PruningScope.OP_MASK_ASSIGN),
                tf_compat.assign(
                    weight_var,
                    tf_compat.multiply(new_mask, op_var_tens),
                    name=PruningScope.OP_WEIGHT_UPDATE,
                ),
            )

    def _no_update():
        with tf_compat.name_scope(
                PruningScope.model(
                    op,
                    ks_group,
                    additional=PruningScope.OPS_UPDATE,
                    trailing_slash=True,
                )):
            # return no op wrapped in group to match update type
            return tf_compat.group(
                tf_compat.constant(0.0,
                                   dtype=op_var_tens.dtype,
                                   name=PruningScope.OP_MASK_UPDATE_NO_OP))

    with tf_compat.name_scope(
            PruningScope.model(
                op,
                ks_group,
                additional=PruningScope.OPS_UPDATE,
                trailing_slash=True,
            )):
        mask_update = tf_compat.cond(update_ready,
                                     _update,
                                     _no_update,
                                     name=PruningScope.OP_MASK_UPDATE)

    # add return state to collections
    tf_compat.add_to_collection(
        PruningScope.collection_name(ks_group, PruningScope.OP_MASK_UPDATE),
        mask_update)

    return PruningOpVars(op, op_var_tens, mask_update, mask, masked)
Example #10
0
def step_lr_schedule(
    global_step: tf_compat.Tensor,
    start_step: int,
    end_step: int,
    step_size: int,
    init_lr: float,
    gamma: float,
    name: str = "exponential_lr_schedule",
) -> tf_compat.Tensor:
    """
    Create an exponential learning rate schedule in the current graph.
    Multiplies init_lr by gamma after each step_size interval has passed.
    Ex: lr = init_lr * (gamma ** NUM_UPDATES)

    :param global_step: the global step used for training
    :param start_step: the step to start the exponential schedule on
    :param end_step: the step to end the exponential schedule on,
        can be set to -1 and in that event will continually update the LR
    :param step_size: the number of steps between each gamma update to the init_lr
    :param init_lr: the learning rate to start the schedule with
    :param gamma: the decay weight to decrease init_lr by after every step_size interval
    :param name: the name scope to create the graph under
    :return: the calculated learning rate tensor
    """
    with tf_compat.name_scope(name):
        global_step = tf_compat.cast(global_step, tf_compat.int64)
        max_updates = tf_compat.constant(
            (end_step - start_step) // step_size if end_step > 0 else -1,
            dtype=tf_compat.int64,
            name="max_updates",
        )
        start_step = tf_compat.constant(start_step,
                                        dtype=tf_compat.int64,
                                        name="start_step")
        end_step = tf_compat.constant(end_step,
                                      dtype=tf_compat.int64,
                                      name="end_step")
        init_lr = tf_compat.constant(init_lr,
                                     dtype=tf_compat.float32,
                                     name="init_lr")
        step_size = tf_compat.constant(step_size,
                                       dtype=tf_compat.int64,
                                       name="step_size")
        gamma = tf_compat.constant(gamma,
                                   dtype=tf_compat.float32,
                                   name="gamma")
        before = tf_compat.less(global_step, start_step, name="before")
        after = tf_compat.logical_and(
            tf_compat.greater_equal(global_step, end_step, name="after"),
            tf_compat.not_equal(end_step,
                                tf_compat.constant(-1, tf_compat.int64)),
        )

        def _calc_lr():
            steps = tf_compat.subtract(global_step, start_step)
            updates = tf_compat.cond(
                after,
                lambda: max_updates,
                lambda: tf_compat.cast(
                    tf_compat.floor(tf_compat.divide(steps, step_size)),
                    tf_compat.int64,
                ),
            )
            mult_g = tf_compat.pow(gamma,
                                   tf_compat.cast(updates, tf_compat.float32))

            return tf_compat.multiply(init_lr, mult_g)

        learning_rate = tf_compat.cond(before,
                                       lambda: init_lr,
                                       _calc_lr,
                                       name="learning_rate")

    return learning_rate